You've already forked wine-staging
mirror of
https://gitlab.winehq.org/wine/wine-staging.git
synced 2025-12-15 08:03:15 -08:00
12756 lines
565 KiB
Diff
12756 lines
565 KiB
Diff
From 4b66dcfca6a93be2b57d76220fe63f58049bd5ef Mon Sep 17 00:00:00 2001
|
|
From: Alistair Leslie-Hughes <leslie_alistair@hotmail.com>
|
|
Date: Tue, 25 Nov 2025 12:56:39 +1100
|
|
Subject: [PATCH] Updated vkd3d to cd230078a7d15115f25e3ef1d1f88662c6f582bc.
|
|
|
|
---
|
|
libs/vkd3d/Makefile.in | 4 +-
|
|
libs/vkd3d/include/private/spirv_grammar.h | 561 +----
|
|
libs/vkd3d/include/private/vkd3d_common.h | 9 +-
|
|
libs/vkd3d/include/private/vkd3d_version.h | 2 +-
|
|
libs/vkd3d/include/vkd3d_shader.h | 19 +
|
|
libs/vkd3d/libs/vkd3d-common/blob.c | 1 +
|
|
libs/vkd3d/libs/vkd3d-shader/d3d_asm.c | 78 +-
|
|
libs/vkd3d/libs/vkd3d-shader/d3dbc.c | 157 +-
|
|
libs/vkd3d/libs/vkd3d-shader/dxbc.c | 13 +-
|
|
libs/vkd3d/libs/vkd3d-shader/dxil.c | 1707 ++++++++-------
|
|
libs/vkd3d/libs/vkd3d-shader/glsl.c | 24 +-
|
|
libs/vkd3d/libs/vkd3d-shader/hlsl.c | 2 +
|
|
libs/vkd3d/libs/vkd3d-shader/hlsl.y | 39 +-
|
|
libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c | 620 +++---
|
|
libs/vkd3d/libs/vkd3d-shader/ir.c | 1859 +++++++++++++----
|
|
libs/vkd3d/libs/vkd3d-shader/msl.c | 20 +-
|
|
libs/vkd3d/libs/vkd3d-shader/preproc.l | 1 +
|
|
libs/vkd3d/libs/vkd3d-shader/spirv.c | 471 ++---
|
|
libs/vkd3d/libs/vkd3d-shader/tpf.c | 281 ++-
|
|
.../libs/vkd3d-shader/vkd3d_shader_main.c | 51 +-
|
|
.../libs/vkd3d-shader/vkd3d_shader_private.h | 153 +-
|
|
libs/vkd3d/libs/vkd3d/command.c | 143 +-
|
|
libs/vkd3d/libs/vkd3d/device.c | 56 +-
|
|
libs/vkd3d/libs/vkd3d/resource.c | 38 +-
|
|
libs/vkd3d/libs/vkd3d/state.c | 24 +-
|
|
libs/vkd3d/libs/vkd3d/vkd3d_private.h | 24 +-
|
|
26 files changed, 3655 insertions(+), 2702 deletions(-)
|
|
|
|
diff --git a/libs/vkd3d/Makefile.in b/libs/vkd3d/Makefile.in
|
|
index 9ad9ed850b6..868f4582380 100644
|
|
--- a/libs/vkd3d/Makefile.in
|
|
+++ b/libs/vkd3d/Makefile.in
|
|
@@ -7,7 +7,6 @@ EXTRADEFS = \
|
|
-DLIBVKD3D_UTILS_SOURCE
|
|
|
|
SOURCES = \
|
|
- config.h \
|
|
libs/vkd3d-common/blob.c \
|
|
libs/vkd3d-common/debug.c \
|
|
libs/vkd3d-common/error.c \
|
|
@@ -40,4 +39,5 @@ SOURCES = \
|
|
libs/vkd3d/resource.c \
|
|
libs/vkd3d/state.c \
|
|
libs/vkd3d/utils.c \
|
|
- libs/vkd3d/vkd3d_main.c
|
|
+ libs/vkd3d/vkd3d_main.c \
|
|
+ config.h
|
|
diff --git a/libs/vkd3d/include/private/spirv_grammar.h b/libs/vkd3d/include/private/spirv_grammar.h
|
|
index 34cadd9bd58..2aac5a6558c 100644
|
|
--- a/libs/vkd3d/include/private/spirv_grammar.h
|
|
+++ b/libs/vkd3d/include/private/spirv_grammar.h
|
|
@@ -43,12 +43,10 @@ enum spirv_parser_operand_type
|
|
SPIRV_PARSER_OPERAND_TYPE_ADDRESSING_MODEL,
|
|
SPIRV_PARSER_OPERAND_TYPE_BUILT_IN,
|
|
SPIRV_PARSER_OPERAND_TYPE_CAPABILITY,
|
|
- SPIRV_PARSER_OPERAND_TYPE_COMPONENT_TYPE,
|
|
SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_LAYOUT,
|
|
SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS,
|
|
SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_REDUCE,
|
|
SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_USE,
|
|
- SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_VECTOR_MATRIX_LAYOUT,
|
|
SPIRV_PARSER_OPERAND_TYPE_DECORATION,
|
|
SPIRV_PARSER_OPERAND_TYPE_DIM,
|
|
SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODE,
|
|
@@ -84,7 +82,6 @@ enum spirv_parser_operand_type
|
|
SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING,
|
|
SPIRV_PARSER_OPERAND_TYPE_LOAD_CACHE_CONTROL,
|
|
SPIRV_PARSER_OPERAND_TYPE_LOOP_CONTROL,
|
|
- SPIRV_PARSER_OPERAND_TYPE_MATRIX_MULTIPLY_ACCUMULATE_OPERANDS,
|
|
SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS,
|
|
SPIRV_PARSER_OPERAND_TYPE_MEMORY_MODEL,
|
|
SPIRV_PARSER_OPERAND_TYPE_MEMORY_SEMANTICS,
|
|
@@ -149,7 +146,7 @@ spirv_parser_operand_type_info[] =
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_BUILT_IN] =
|
|
{
|
|
- "BuiltIn", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 126,
|
|
+ "BuiltIn", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 116,
|
|
(struct spirv_parser_enumerant[])
|
|
{
|
|
{0, "Position"},
|
|
@@ -210,9 +207,6 @@ spirv_parser_operand_type_info[] =
|
|
{0x1156, "DeviceIndex"},
|
|
{0x1158, "ViewIndex"},
|
|
{0x115c, "ShadingRateKHR"},
|
|
- {0x118c, "TileOffsetQCOM"},
|
|
- {0x118d, "TileDimensionQCOM"},
|
|
- {0x118e, "TileApronSizeQCOM"},
|
|
{0x1380, "BaryCoordNoPerspAMD"},
|
|
{0x1381, "BaryCoordNoPerspCentroidAMD"},
|
|
{0x1382, "BaryCoordNoPerspSampleAMD"},
|
|
@@ -264,25 +258,18 @@ spirv_parser_operand_type_info[] =
|
|
{0x14e0, "HitMicroTriangleVertexBarycentricsNV"},
|
|
{0x14e7, "IncomingRayFlagsKHR"},
|
|
{0x14e8, "RayGeometryIndexKHR"},
|
|
- {0x14ef, "HitIsSphereNV"},
|
|
- {0x14f0, "HitIsLSSNV"},
|
|
- {0x14f1, "HitSpherePositionNV"},
|
|
{0x14fe, "WarpsPerSMNV"},
|
|
{0x14ff, "SMCountNV"},
|
|
{0x1500, "WarpIDNV"},
|
|
{0x1501, "SMIDNV"},
|
|
- {0x1514, "HitLSSPositionsNV"},
|
|
{0x151d, "HitKindFrontFacingMicroTriangleNV"},
|
|
{0x151e, "HitKindBackFacingMicroTriangleNV"},
|
|
- {0x152c, "HitSphereRadiusNV"},
|
|
- {0x152d, "HitLSSRadiiNV"},
|
|
- {0x153c, "ClusterIDNV"},
|
|
{0x1785, "CullMaskKHR"},
|
|
}
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_CAPABILITY] =
|
|
{
|
|
- "Capability", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 261,
|
|
+ "Capability", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 245,
|
|
(struct spirv_parser_enumerant[])
|
|
{
|
|
{0, "Matrix"},
|
|
@@ -393,7 +380,6 @@ spirv_parser_operand_type_info[] =
|
|
{0x1184, "TextureSampleWeightedQCOM"},
|
|
{0x1185, "TextureBoxFilterQCOM"},
|
|
{0x1186, "TextureBlockMatchQCOM"},
|
|
- {0x118f, "TileShadingQCOM"},
|
|
{0x1192, "TextureBlockMatch2QCOM"},
|
|
{0x1390, "Float16ImageAMD"},
|
|
{0x1391, "ImageGatherBiasLodAMD"},
|
|
@@ -404,9 +390,6 @@ spirv_parser_operand_type_info[] =
|
|
{0x13bf, "ShaderClockKHR"},
|
|
{0x13cb, "ShaderEnqueueAMDX"},
|
|
{0x13df, "QuadControlKHR"},
|
|
- {0x13fc, "BFloat16TypeKHR"},
|
|
- {0x13fd, "BFloat16DotProductKHR"},
|
|
- {0x13fe, "BFloat16CooperativeMatrixKHR"},
|
|
{0x1481, "SampleMaskOverrideCoverageNV"},
|
|
{0x1483, "GeometryShaderPassthroughNV"},
|
|
{0x1486, "ShaderViewportIndexLayerEXT"},
|
|
@@ -452,19 +435,14 @@ spirv_parser_operand_type_info[] =
|
|
{0x1507, "ShaderInvocationReorderNV"},
|
|
{0x150e, "BindlessTextureNV"},
|
|
{0x150f, "RayQueryPositionFetchKHR"},
|
|
- {0x1512, "CooperativeVectorNV"},
|
|
{0x151c, "AtomicFloat16VectorNV"},
|
|
{0x1521, "RayTracingDisplacementMicromapNV"},
|
|
{0x1526, "RawAccessChainsNV"},
|
|
- {0x152a, "RayTracingSpheresGeometryNV"},
|
|
- {0x152b, "RayTracingLinearSweptSpheresGeometryNV"},
|
|
{0x1536, "CooperativeMatrixReductionsNV"},
|
|
{0x1537, "CooperativeMatrixConversionsNV"},
|
|
{0x1538, "CooperativeMatrixPerElementOperationsNV"},
|
|
{0x1539, "CooperativeMatrixTensorAddressingNV"},
|
|
{0x153a, "CooperativeMatrixBlockLoadsNV"},
|
|
- {0x153b, "CooperativeVectorTrainingNV"},
|
|
- {0x153d, "RayTracingClusterAccelerationStructureNV"},
|
|
{0x153f, "TensorAddressingNV"},
|
|
{0x15c0, "SubgroupShuffleINTEL"},
|
|
{0x15c1, "SubgroupBufferBlockIOINTEL"},
|
|
@@ -529,47 +507,18 @@ spirv_parser_operand_type_info[] =
|
|
{0x1800, "ArithmeticFenceEXT"},
|
|
{0x1806, "FPGAClusterAttributesV2INTEL"},
|
|
{0x1811, "FPGAKernelAttributesv2INTEL"},
|
|
- {0x1812, "TaskSequenceINTEL"},
|
|
{0x1819, "FPMaxErrorINTEL"},
|
|
{0x181b, "FPGALatencyControlINTEL"},
|
|
{0x181e, "FPGAArgumentInterfacesINTEL"},
|
|
{0x182b, "GlobalVariableHostAccessINTEL"},
|
|
{0x182d, "GlobalVariableFPGADecorationsINTEL"},
|
|
{0x184c, "SubgroupBufferPrefetchINTEL"},
|
|
- {0x1854, "Subgroup2DBlockIOINTEL"},
|
|
- {0x1855, "Subgroup2DBlockTransformINTEL"},
|
|
- {0x1856, "Subgroup2DBlockTransposeINTEL"},
|
|
- {0x185c, "SubgroupMatrixMultiplyAccumulateINTEL"},
|
|
- {0x1861, "TernaryBitwiseFunctionINTEL"},
|
|
{0x1900, "GroupUniformArithmeticKHR"},
|
|
- {0x1919, "TensorFloat32RoundingINTEL"},
|
|
{0x191b, "MaskedGatherScatterINTEL"},
|
|
{0x1929, "CacheControlsINTEL"},
|
|
{0x193c, "RegisterLimitsINTEL"},
|
|
}
|
|
},
|
|
- [SPIRV_PARSER_OPERAND_TYPE_COMPONENT_TYPE] =
|
|
- {
|
|
- "ComponentType", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 15,
|
|
- (struct spirv_parser_enumerant[])
|
|
- {
|
|
- {0, "Float16NV"},
|
|
- {0x1, "Float32NV"},
|
|
- {0x2, "Float64NV"},
|
|
- {0x3, "SignedInt8NV"},
|
|
- {0x4, "SignedInt16NV"},
|
|
- {0x5, "SignedInt32NV"},
|
|
- {0x6, "SignedInt64NV"},
|
|
- {0x7, "UnsignedInt8NV"},
|
|
- {0x8, "UnsignedInt16NV"},
|
|
- {0x9, "UnsignedInt32NV"},
|
|
- {0xa, "UnsignedInt64NV"},
|
|
- {0x3ba247f8, "SignedInt8PackedNV"},
|
|
- {0x3ba247f9, "UnsignedInt8PackedNV"},
|
|
- {0x3ba247fa, "FloatE4M3NV"},
|
|
- {0x3ba247fb, "FloatE5M2NV"},
|
|
- }
|
|
- },
|
|
[SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_LAYOUT] =
|
|
{
|
|
"CooperativeMatrixLayout", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
|
|
@@ -614,17 +563,6 @@ spirv_parser_operand_type_info[] =
|
|
{0x2, "MatrixAccumulatorKHR"},
|
|
}
|
|
},
|
|
- [SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_VECTOR_MATRIX_LAYOUT] =
|
|
- {
|
|
- "CooperativeVectorMatrixLayout", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 4,
|
|
- (struct spirv_parser_enumerant[])
|
|
- {
|
|
- {0, "RowMajorNV"},
|
|
- {0x1, "ColumnMajorNV"},
|
|
- {0x2, "InferencingOptimalNV"},
|
|
- {0x3, "TrainingOptimalNV"},
|
|
- }
|
|
- },
|
|
[SPIRV_PARSER_OPERAND_TYPE_DECORATION] =
|
|
{
|
|
"Decoration", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 142,
|
|
@@ -1240,7 +1178,7 @@ spirv_parser_operand_type_info[] =
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_EXECUTION_MODE] =
|
|
{
|
|
- "ExecutionMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 96,
|
|
+ "ExecutionMode", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 94,
|
|
(struct spirv_parser_enumerant[])
|
|
{
|
|
{
|
|
@@ -1389,16 +1327,6 @@ spirv_parser_operand_type_info[] =
|
|
SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
|
|
}
|
|
},
|
|
- {0x1189, "NonCoherentTileAttachmentReadQCOM"},
|
|
- {
|
|
- 0x118a, "TileShadingRateQCOM", 3,
|
|
- (enum spirv_parser_operand_type[])
|
|
- {
|
|
- SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
|
|
- SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
|
|
- SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER,
|
|
- }
|
|
- },
|
|
{0x1399, "EarlyAndLateFragmentTestsAMD"},
|
|
{0x13a3, "StencilRefReplacingEXT"},
|
|
{0x13cd, "CoalescingAMDX"},
|
|
@@ -1628,11 +1556,7 @@ spirv_parser_operand_type_info[] =
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_FPENCODING] =
|
|
{
|
|
- "FPEncoding", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 1,
|
|
- (struct spirv_parser_enumerant[])
|
|
- {
|
|
- {0, "BFloat16KHR"},
|
|
- }
|
|
+ "FPEncoding", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_FPFAST_MATH_MODE] =
|
|
{
|
|
@@ -1757,7 +1681,7 @@ spirv_parser_operand_type_info[] =
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_IMAGE_CHANNEL_DATA_TYPE] =
|
|
{
|
|
- "ImageChannelDataType", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 26,
|
|
+ "ImageChannelDataType", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 20,
|
|
(struct spirv_parser_enumerant[])
|
|
{
|
|
{0, "SnormInt8"},
|
|
@@ -1777,15 +1701,9 @@ spirv_parser_operand_type_info[] =
|
|
{0xe, "Float"},
|
|
{0xf, "UnormInt24"},
|
|
{0x10, "UnormInt101010_2"},
|
|
- {0x11, "UnormInt10X6EXT"},
|
|
{0x13, "UnsignedIntRaw10EXT"},
|
|
{0x14, "UnsignedIntRaw12EXT"},
|
|
{0x15, "UnormInt2_101010EXT"},
|
|
- {0x16, "UnsignedInt10X6EXT"},
|
|
- {0x17, "UnsignedInt12X4EXT"},
|
|
- {0x18, "UnsignedInt14X2EXT"},
|
|
- {0x19, "UnormInt12X4EXT"},
|
|
- {0x1a, "UnormInt14X2EXT"},
|
|
}
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_IMAGE_CHANNEL_ORDER] =
|
|
@@ -2146,28 +2064,6 @@ spirv_parser_operand_type_info[] =
|
|
},
|
|
}
|
|
},
|
|
- [SPIRV_PARSER_OPERAND_TYPE_MATRIX_MULTIPLY_ACCUMULATE_OPERANDS] =
|
|
- {
|
|
- "MatrixMultiplyAccumulateOperands", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 15,
|
|
- (struct spirv_parser_enumerant[])
|
|
- {
|
|
- {0, "None"},
|
|
- {0x1, "MatrixASignedComponentsINTEL"},
|
|
- {0x2, "MatrixBSignedComponentsINTEL"},
|
|
- {0x4, "MatrixCBFloat16INTEL"},
|
|
- {0x8, "MatrixResultBFloat16INTEL"},
|
|
- {0x10, "MatrixAPackedInt8INTEL"},
|
|
- {0x20, "MatrixBPackedInt8INTEL"},
|
|
- {0x40, "MatrixAPackedInt4INTEL"},
|
|
- {0x80, "MatrixBPackedInt4INTEL"},
|
|
- {0x100, "MatrixATF32INTEL"},
|
|
- {0x200, "MatrixBTF32INTEL"},
|
|
- {0x400, "MatrixAPackedFloat16INTEL"},
|
|
- {0x800, "MatrixBPackedFloat16INTEL"},
|
|
- {0x1000, "MatrixAPackedBFloat16INTEL"},
|
|
- {0x2000, "MatrixBPackedBFloat16INTEL"},
|
|
- }
|
|
- },
|
|
[SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS] =
|
|
{
|
|
"MemoryAccess", SPIRV_PARSER_OPERAND_CATEGORY_BIT_ENUM, 9,
|
|
@@ -2405,7 +2301,7 @@ spirv_parser_operand_type_info[] =
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_SOURCE_LANGUAGE] =
|
|
{
|
|
- "SourceLanguage", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 14,
|
|
+ "SourceLanguage", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 13,
|
|
(struct spirv_parser_enumerant[])
|
|
{
|
|
{0, "Unknown"},
|
|
@@ -2421,12 +2317,11 @@ spirv_parser_operand_type_info[] =
|
|
{0xa, "WGSL"},
|
|
{0xb, "Slang"},
|
|
{0xc, "Zig"},
|
|
- {0xd, "Rust"},
|
|
}
|
|
},
|
|
[SPIRV_PARSER_OPERAND_TYPE_STORAGE_CLASS] =
|
|
{
|
|
- "StorageClass", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 28,
|
|
+ "StorageClass", SPIRV_PARSER_OPERAND_CATEGORY_VALUE_ENUM, 27,
|
|
(struct spirv_parser_enumerant[])
|
|
{
|
|
{0, "UniformConstant"},
|
|
@@ -2443,7 +2338,6 @@ spirv_parser_operand_type_info[] =
|
|
{0xb, "Image"},
|
|
{0xc, "StorageBuffer"},
|
|
{0x104c, "TileImageEXT"},
|
|
- {0x118b, "TileAttachmentQCOM"},
|
|
{0x13cc, "NodePayloadAMDX"},
|
|
{0x14d0, "CallableDataKHR"},
|
|
{0x14d1, "IncomingCallableDataKHR"},
|
|
@@ -6996,78 +6890,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_IMAGE_OPERANDS, '?'},
|
|
}
|
|
},
|
|
- {
|
|
- 0x14a8, "OpTypeCooperativeVectorNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x14a9, "OpCooperativeVectorMatrixMulNV", 13,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS, '?'},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x14aa, "OpCooperativeVectorOuterProductAccumulateNV", 7,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x14ab, "OpCooperativeVectorReduceSumAccumulateNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x14ac, "OpCooperativeVectorMatrixMulAddNV", 16,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '?'},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS, '?'},
|
|
- }
|
|
- },
|
|
{
|
|
0x14ad, "OpCooperativeMatrixConvertNV", 3,
|
|
(struct spirv_parser_instruction_operand[])
|
|
@@ -7138,27 +6960,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
}
|
|
},
|
|
- {
|
|
- 0x14b6, "OpCooperativeVectorLoadNV", 5,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x14b7, "OpCooperativeVectorStoreNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
|
|
- }
|
|
- },
|
|
{
|
|
0x14d6, "OpReportIntersectionKHR", 4,
|
|
(struct spirv_parser_instruction_operand[])
|
|
@@ -7249,25 +7050,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
}
|
|
},
|
|
- {
|
|
- 0x14e1, "OpRayQueryGetClusterIdNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x14e2, "OpHitObjectGetClusterIdNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
{
|
|
0x14ee, "OpTypeCooperativeMatrixNV", 5,
|
|
(struct spirv_parser_instruction_operand[])
|
|
@@ -7580,130 +7362,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_RAW_ACCESS_CHAIN_OPERANDS, '?'},
|
|
}
|
|
},
|
|
- {
|
|
- 0x1533, "OpRayQueryGetIntersectionSpherePositionNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1534, "OpRayQueryGetIntersectionSphereRadiusNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1535, "OpRayQueryGetIntersectionLSSPositionsNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1536, "OpRayQueryGetIntersectionLSSRadiiNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1537, "OpRayQueryGetIntersectionLSSHitValueNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1538, "OpHitObjectGetSpherePositionNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1539, "OpHitObjectGetSphereRadiusNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x153a, "OpHitObjectGetLSSPositionsNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x153b, "OpHitObjectGetLSSRadiiNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x153c, "OpHitObjectIsSphereHitNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x153d, "OpHitObjectIsLSSHitNV", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x153e, "OpRayQueryIsSphereHitNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x153f, "OpRayQueryIsLSSHitNV", 4,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
{
|
|
0x15c3, "OpSubgroupShuffleINTEL", 4,
|
|
(struct spirv_parser_instruction_operand[])
|
|
@@ -7962,9 +7620,10 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x15e9, "OpAsmTargetINTEL", 2,
|
|
+ 0x15e9, "OpAsmTargetINTEL", 3,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_STRING},
|
|
}
|
|
@@ -9237,7 +8896,7 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x16d0, "OpArbitraryFloatSinCosPiINTEL", 8,
|
|
+ 0x16d0, "OpArbitraryFloatSinCosPiINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
@@ -9248,6 +8907,7 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
}
|
|
},
|
|
{
|
|
@@ -9279,7 +8939,7 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x16d3, "OpArbitraryFloatCastToIntINTEL", 8,
|
|
+ 0x16d3, "OpArbitraryFloatCastToIntINTEL", 7,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
@@ -9289,7 +8949,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
}
|
|
},
|
|
{
|
|
@@ -9803,7 +9462,7 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x16fa, "OpArbitraryFloatPowNINTEL", 10,
|
|
+ 0x16fa, "OpArbitraryFloatPowNINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
@@ -9815,7 +9474,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
}
|
|
},
|
|
{
|
|
@@ -9851,12 +9509,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x1723, "OpFixedSqrtINTEL", 8,
|
|
+ 0x1723, "OpFixedSqrtINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9865,12 +9524,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x1724, "OpFixedRecipINTEL", 8,
|
|
+ 0x1724, "OpFixedRecipINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9879,12 +9539,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x1725, "OpFixedRsqrtINTEL", 8,
|
|
+ 0x1725, "OpFixedRsqrtINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9893,12 +9554,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x1726, "OpFixedSinINTEL", 8,
|
|
+ 0x1726, "OpFixedSinINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9907,12 +9569,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x1727, "OpFixedCosINTEL", 8,
|
|
+ 0x1727, "OpFixedCosINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9921,12 +9584,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x1728, "OpFixedSinCosINTEL", 8,
|
|
+ 0x1728, "OpFixedSinCosINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9935,12 +9599,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x1729, "OpFixedSinPiINTEL", 8,
|
|
+ 0x1729, "OpFixedSinPiINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9949,12 +9614,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x172a, "OpFixedCosPiINTEL", 8,
|
|
+ 0x172a, "OpFixedCosPiINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9963,12 +9629,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x172b, "OpFixedSinCosPiINTEL", 8,
|
|
+ 0x172b, "OpFixedSinCosPiINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9977,12 +9644,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x172c, "OpFixedLogINTEL", 8,
|
|
+ 0x172c, "OpFixedLogINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -9991,12 +9659,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x172d, "OpFixedExpINTEL", 8,
|
|
+ 0x172d, "OpFixedExpINTEL", 9,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
{SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
@@ -10043,12 +9712,13 @@ spirv_parser_opcode_info[] =
|
|
}
|
|
},
|
|
{
|
|
- 0x173d, "OpFPGARegINTEL", 3,
|
|
+ 0x173d, "OpFPGARegINTEL", 4,
|
|
(struct spirv_parser_instruction_operand[])
|
|
{
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
+ {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
}
|
|
},
|
|
{
|
|
@@ -10311,50 +9981,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
}
|
|
},
|
|
- {
|
|
- 0x1813, "OpTaskSequenceCreateINTEL", 7,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_LITERAL_INTEGER},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1814, "OpTaskSequenceAsyncINTEL", 2,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF, '*'},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1815, "OpTaskSequenceGetINTEL", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1816, "OpTaskSequenceReleaseINTEL", 1,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1837, "OpTypeTaskSequenceINTEL", 1,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- }
|
|
- },
|
|
{
|
|
0x184d, "OpSubgroupBlockPrefetchINTEL", 3,
|
|
(struct spirv_parser_instruction_operand[])
|
|
@@ -10364,110 +9990,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_MEMORY_ACCESS, '?'},
|
|
}
|
|
},
|
|
- {
|
|
- 0x1857, "OpSubgroup2DBlockLoadINTEL", 10,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1858, "OpSubgroup2DBlockLoadTransformINTEL", 10,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1859, "OpSubgroup2DBlockLoadTransposeINTEL", 10,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x185a, "OpSubgroup2DBlockPrefetchINTEL", 9,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x185b, "OpSubgroup2DBlockStoreINTEL", 10,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x185d, "OpSubgroupMatrixMultiplyAccumulateINTEL", 7,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_MATRIX_MULTIPLY_ACCUMULATE_OPERANDS, '?'},
|
|
- }
|
|
- },
|
|
- {
|
|
- 0x1862, "OpBitwiseFunctionINTEL", 6,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
{
|
|
0x1901, "OpGroupIMulKHR", 5,
|
|
(struct spirv_parser_instruction_operand[])
|
|
@@ -10556,15 +10078,6 @@ spirv_parser_opcode_info[] =
|
|
{SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
}
|
|
},
|
|
- {
|
|
- 0x191a, "OpRoundFToTF32INTEL", 3,
|
|
- (struct spirv_parser_instruction_operand[])
|
|
- {
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT_TYPE},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_RESULT},
|
|
- {SPIRV_PARSER_OPERAND_TYPE_ID_REF},
|
|
- }
|
|
- },
|
|
{
|
|
0x191c, "OpMaskedGatherINTEL", 6,
|
|
(struct spirv_parser_instruction_operand[])
|
|
diff --git a/libs/vkd3d/include/private/vkd3d_common.h b/libs/vkd3d/include/private/vkd3d_common.h
|
|
index 7ee11b54396..60d1665cc85 100644
|
|
--- a/libs/vkd3d/include/private/vkd3d_common.h
|
|
+++ b/libs/vkd3d/include/private/vkd3d_common.h
|
|
@@ -285,7 +285,7 @@ static inline unsigned int vkd3d_popcount(unsigned int v)
|
|
{
|
|
#ifdef _MSC_VER
|
|
return __popcnt(v);
|
|
-#elif defined(__MINGW32__)
|
|
+#elif defined(HAVE_BUILTIN_POPCOUNT)
|
|
return __builtin_popcount(v);
|
|
#else
|
|
v -= (v >> 1) & 0x55555555;
|
|
@@ -349,12 +349,7 @@ static inline unsigned int vkd3d_log2i(unsigned int x)
|
|
|
|
static inline unsigned int vkd3d_ctz(uint32_t v)
|
|
{
|
|
-#ifdef _WIN32
|
|
- ULONG result;
|
|
- if (_BitScanForward(&result, v))
|
|
- return (unsigned int)result;
|
|
- return 32;
|
|
-#elif defined(HAVE_BUILTIN_CTZ)
|
|
+#ifdef HAVE_BUILTIN_CTZ
|
|
return __builtin_ctz(v);
|
|
#else
|
|
unsigned int c = 31;
|
|
diff --git a/libs/vkd3d/include/private/vkd3d_version.h b/libs/vkd3d/include/private/vkd3d_version.h
|
|
index 0edc4428022..687751d6a5f 100644
|
|
--- a/libs/vkd3d/include/private/vkd3d_version.h
|
|
+++ b/libs/vkd3d/include/private/vkd3d_version.h
|
|
@@ -1 +1 @@
|
|
-#define VKD3D_VCS_ID " (Wine bundled)"
|
|
+#define VKD3D_VCS_ID " (git a8ca1f95)"
|
|
diff --git a/libs/vkd3d/include/vkd3d_shader.h b/libs/vkd3d/include/vkd3d_shader.h
|
|
index 352c222f27d..cc6cf4001a7 100644
|
|
--- a/libs/vkd3d/include/vkd3d_shader.h
|
|
+++ b/libs/vkd3d/include/vkd3d_shader.h
|
|
@@ -1039,6 +1039,25 @@ enum vkd3d_shader_parameter_name
|
|
VKD3D_SHADER_PARAMETER_NAME_BUMP_LUMINANCE_OFFSET_3,
|
|
VKD3D_SHADER_PARAMETER_NAME_BUMP_LUMINANCE_OFFSET_4,
|
|
VKD3D_SHADER_PARAMETER_NAME_BUMP_LUMINANCE_OFFSET_5,
|
|
+ /**
|
|
+ * A mask of projected textures.
|
|
+ *
|
|
+ * When this parameter is provided to a shader model 1.0-1.3 pixel shader,
|
|
+ * for each nonzero bit of this mask, the corresponding texture will be
|
|
+ * projected. That is, it will have its coordinates divided by their W
|
|
+ * component before sampling.
|
|
+ *
|
|
+ * The default value is zero, i.e. no textures are projected.
|
|
+ *
|
|
+ * The data type for this parameter must be
|
|
+ * VKD3D_SHADER_PARAMETER_DATA_TYPE_UINT32.
|
|
+ *
|
|
+ * Only VKD3D_SHADER_PARAMETER_TYPE_IMMEDIATE_CONSTANT is supported in this
|
|
+ * version of vkd3d-shader.
|
|
+ *
|
|
+ * \since 1.19
|
|
+ */
|
|
+ VKD3D_SHADER_PARAMETER_NAME_PROJECTED_TEXTURE_MASK,
|
|
|
|
VKD3D_FORCE_32_BIT_ENUM(VKD3D_SHADER_PARAMETER_NAME),
|
|
};
|
|
diff --git a/libs/vkd3d/libs/vkd3d-common/blob.c b/libs/vkd3d/libs/vkd3d-common/blob.c
|
|
index f60ef7db769..c2c6ad67804 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-common/blob.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-common/blob.c
|
|
@@ -20,6 +20,7 @@
|
|
#define WIDL_C_INLINE_WRAPPERS
|
|
#endif
|
|
#define COBJMACROS
|
|
+
|
|
#define CONST_VTABLE
|
|
#include "vkd3d.h"
|
|
#include "vkd3d_blob.h"
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
|
|
index b316f6c8830..339225d00e2 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/d3d_asm.c
|
|
@@ -563,8 +563,8 @@ static void shader_print_dcl_usage(struct vkd3d_d3d_asm_compiler *compiler,
|
|
vkd3d_string_buffer_printf(buffer, "%s%s%s", prefix, usage, suffix);
|
|
}
|
|
|
|
-static void shader_print_src_param(struct vkd3d_d3d_asm_compiler *compiler,
|
|
- const char *prefix, const struct vkd3d_shader_src_param *param, const char *suffix);
|
|
+static void shader_print_src_operand(struct vkd3d_d3d_asm_compiler *compiler,
|
|
+ const char *prefix, const struct vsir_src_operand *src, const char *suffix);
|
|
|
|
static void shader_print_float_literal(struct vkd3d_d3d_asm_compiler *compiler,
|
|
const char *prefix, float f, const char *suffix)
|
|
@@ -672,10 +672,10 @@ static void shader_print_untyped_literal(struct vkd3d_d3d_asm_compiler *compiler
|
|
}
|
|
|
|
static void shader_print_subscript(struct vkd3d_d3d_asm_compiler *compiler,
|
|
- unsigned int offset, const struct vkd3d_shader_src_param *rel_addr)
|
|
+ unsigned int offset, const struct vsir_src_operand *rel_addr)
|
|
{
|
|
if (rel_addr)
|
|
- shader_print_src_param(compiler, "[", rel_addr, " + ");
|
|
+ shader_print_src_operand(compiler, "[", rel_addr, " + ");
|
|
shader_print_uint_literal(compiler, rel_addr ? "" : "[", offset, "]");
|
|
}
|
|
|
|
@@ -1041,32 +1041,32 @@ static void shader_print_write_mask(struct vkd3d_d3d_asm_compiler *compiler,
|
|
compiler->colours.write_mask, buffer, compiler->colours.reset, suffix);
|
|
}
|
|
|
|
-static void shader_print_dst_param(struct vkd3d_d3d_asm_compiler *compiler,
|
|
- const char *prefix, const struct vkd3d_shader_dst_param *param, bool is_declaration, const char *suffix)
|
|
+static void shader_print_dst_operand(struct vkd3d_d3d_asm_compiler *compiler,
|
|
+ const char *prefix, const struct vsir_dst_operand *dst, bool is_declaration, const char *suffix)
|
|
{
|
|
- uint32_t write_mask = param->write_mask;
|
|
+ uint32_t write_mask = dst->write_mask;
|
|
|
|
- shader_print_register(compiler, prefix, ¶m->reg, is_declaration, "");
|
|
+ shader_print_register(compiler, prefix, &dst->reg, is_declaration, "");
|
|
|
|
- if (write_mask && param->reg.dimension == VSIR_DIMENSION_VEC4)
|
|
+ if (write_mask && dst->reg.dimension == VSIR_DIMENSION_VEC4)
|
|
{
|
|
- if (data_type_is_64_bit(param->reg.data_type))
|
|
+ if (data_type_is_64_bit(dst->reg.data_type))
|
|
write_mask = vsir_write_mask_32_from_64(write_mask);
|
|
|
|
shader_print_write_mask(compiler, "", write_mask, "");
|
|
}
|
|
|
|
- shader_print_precision(compiler, ¶m->reg);
|
|
- shader_print_non_uniform(compiler, ¶m->reg);
|
|
- shader_print_reg_type(compiler, "", ¶m->reg, suffix);
|
|
+ shader_print_precision(compiler, &dst->reg);
|
|
+ shader_print_non_uniform(compiler, &dst->reg);
|
|
+ shader_print_reg_type(compiler, "", &dst->reg, suffix);
|
|
}
|
|
|
|
-static void shader_print_src_param(struct vkd3d_d3d_asm_compiler *compiler,
|
|
- const char *prefix, const struct vkd3d_shader_src_param *param, const char *suffix)
|
|
+static void shader_print_src_operand(struct vkd3d_d3d_asm_compiler *compiler,
|
|
+ const char *prefix, const struct vsir_src_operand *src, const char *suffix)
|
|
{
|
|
- enum vkd3d_shader_src_modifier src_modifier = param->modifiers;
|
|
+ enum vkd3d_shader_src_modifier src_modifier = src->modifiers;
|
|
struct vkd3d_string_buffer *buffer = &compiler->buffer;
|
|
- uint32_t swizzle = param->swizzle;
|
|
+ uint32_t swizzle = src->swizzle;
|
|
const char *modifier = "";
|
|
bool is_abs = false;
|
|
|
|
@@ -1085,7 +1085,7 @@ static void shader_print_src_param(struct vkd3d_d3d_asm_compiler *compiler,
|
|
if (src_modifier == VKD3DSPSM_ABS || src_modifier == VKD3DSPSM_ABSNEG)
|
|
is_abs = true;
|
|
|
|
- shader_print_register(compiler, is_abs ? "|" : "", ¶m->reg, false, "");
|
|
+ shader_print_register(compiler, is_abs ? "|" : "", &src->reg, false, "");
|
|
|
|
switch (src_modifier)
|
|
{
|
|
@@ -1120,14 +1120,14 @@ static void shader_print_src_param(struct vkd3d_d3d_asm_compiler *compiler,
|
|
break;
|
|
}
|
|
|
|
- if (param->reg.type != VKD3DSPR_IMMCONST && param->reg.type != VKD3DSPR_IMMCONST64
|
|
- && param->reg.dimension == VSIR_DIMENSION_VEC4)
|
|
+ if (src->reg.type != VKD3DSPR_IMMCONST && src->reg.type != VKD3DSPR_IMMCONST64
|
|
+ && src->reg.dimension == VSIR_DIMENSION_VEC4)
|
|
{
|
|
static const char swizzle_chars[] = "xyzw";
|
|
|
|
unsigned int swizzle_x, swizzle_y, swizzle_z, swizzle_w;
|
|
|
|
- if (data_type_is_64_bit(param->reg.data_type))
|
|
+ if (data_type_is_64_bit(src->reg.data_type))
|
|
swizzle = vsir_swizzle_32_from_64(swizzle);
|
|
|
|
swizzle_x = vsir_swizzle_get_component(swizzle, 0);
|
|
@@ -1147,13 +1147,13 @@ static void shader_print_src_param(struct vkd3d_d3d_asm_compiler *compiler,
|
|
if (is_abs)
|
|
vkd3d_string_buffer_printf(buffer, "|");
|
|
|
|
- shader_print_precision(compiler, ¶m->reg);
|
|
- shader_print_non_uniform(compiler, ¶m->reg);
|
|
- shader_print_reg_type(compiler, "", ¶m->reg, suffix);
|
|
+ shader_print_precision(compiler, &src->reg);
|
|
+ shader_print_non_uniform(compiler, &src->reg);
|
|
+ shader_print_reg_type(compiler, "", &src->reg, suffix);
|
|
}
|
|
|
|
static void shader_dump_ins_modifiers(struct vkd3d_d3d_asm_compiler *compiler,
|
|
- const struct vkd3d_shader_dst_param *dst)
|
|
+ const struct vsir_dst_operand *dst)
|
|
{
|
|
struct vkd3d_string_buffer *buffer = &compiler->buffer;
|
|
uint32_t mmask = dst->modifiers;
|
|
@@ -1508,7 +1508,7 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
compiler->current = ins;
|
|
|
|
if (ins->predicate)
|
|
- shader_print_src_param(compiler, "(", ins->predicate, ") ");
|
|
+ shader_print_src_operand(compiler, "(", ins->predicate, ") ");
|
|
|
|
/* PixWin marks instructions with the coissue flag with a '+' */
|
|
if (ins->coissue)
|
|
@@ -1561,7 +1561,7 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
break;
|
|
|
|
case VSIR_OP_DCL_INDEX_RANGE:
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.index_range.dst, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.index_range.dst, true, "");
|
|
shader_print_uint_literal(compiler, " ", ins->declaration.index_range.register_count, "");
|
|
break;
|
|
|
|
@@ -1579,7 +1579,7 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
|
|
case VSIR_OP_DCL_INPUT_PS:
|
|
shader_print_interpolation_mode(compiler, " ", ins->flags, "");
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.dst, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.dst, true, "");
|
|
break;
|
|
|
|
case VSIR_OP_DCL_INPUT_PS_SGV:
|
|
@@ -1587,19 +1587,19 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
case VSIR_OP_DCL_INPUT_SIV:
|
|
case VSIR_OP_DCL_OUTPUT_SGV:
|
|
case VSIR_OP_DCL_OUTPUT_SIV:
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.register_semantic.reg, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.register_semantic.reg, true, "");
|
|
shader_print_input_sysval_semantic(compiler, ", ", ins->declaration.register_semantic.sysval_semantic, "");
|
|
break;
|
|
|
|
case VSIR_OP_DCL_INPUT_PS_SIV:
|
|
shader_print_interpolation_mode(compiler, " ", ins->flags, "");
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.register_semantic.reg, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.register_semantic.reg, true, "");
|
|
shader_print_input_sysval_semantic(compiler, ", ", ins->declaration.register_semantic.sysval_semantic, "");
|
|
break;
|
|
|
|
case VSIR_OP_DCL_INPUT:
|
|
case VSIR_OP_DCL_OUTPUT:
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.dst, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.dst, true, "");
|
|
break;
|
|
|
|
case VSIR_OP_DCL_INPUT_PRIMITIVE:
|
|
@@ -1615,12 +1615,12 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
break;
|
|
|
|
case VSIR_OP_DCL_RESOURCE_RAW:
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.raw_resource.resource.reg, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.raw_resource.resource.reg, true, "");
|
|
shader_dump_register_space(compiler, ins->declaration.raw_resource.resource.range.space);
|
|
break;
|
|
|
|
case VSIR_OP_DCL_RESOURCE_STRUCTURED:
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.structured_resource.resource.reg, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.structured_resource.resource.reg, true, "");
|
|
shader_print_uint_literal(compiler, ", ", ins->declaration.structured_resource.byte_stride, "");
|
|
shader_dump_register_space(compiler, ins->declaration.structured_resource.resource.range.space);
|
|
break;
|
|
@@ -1654,12 +1654,12 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
break;
|
|
|
|
case VSIR_OP_DCL_TGSM_RAW:
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.tgsm_raw.reg, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.tgsm_raw.reg, true, "");
|
|
shader_print_uint_literal(compiler, ", ", ins->declaration.tgsm_raw.byte_count, "");
|
|
break;
|
|
|
|
case VSIR_OP_DCL_TGSM_STRUCTURED:
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.tgsm_structured.reg, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.tgsm_structured.reg, true, "");
|
|
shader_print_uint_literal(compiler, ", ", ins->declaration.tgsm_structured.byte_stride, "");
|
|
shader_print_uint_literal(compiler, ", ", ins->declaration.tgsm_structured.structure_count, "");
|
|
break;
|
|
@@ -1672,13 +1672,13 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
|
|
case VSIR_OP_DCL_UAV_RAW:
|
|
shader_dump_uav_flags(compiler, ins->flags);
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.raw_resource.resource.reg, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.raw_resource.resource.reg, true, "");
|
|
shader_dump_register_space(compiler, ins->declaration.raw_resource.resource.range.space);
|
|
break;
|
|
|
|
case VSIR_OP_DCL_UAV_STRUCTURED:
|
|
shader_dump_uav_flags(compiler, ins->flags);
|
|
- shader_print_dst_param(compiler, " ", &ins->declaration.structured_resource.resource.reg, true, "");
|
|
+ shader_print_dst_operand(compiler, " ", &ins->declaration.structured_resource.resource.reg, true, "");
|
|
shader_print_uint_literal(compiler, ", ", ins->declaration.structured_resource.byte_stride, "");
|
|
shader_dump_register_space(compiler, ins->declaration.structured_resource.resource.range.space);
|
|
break;
|
|
@@ -1744,13 +1744,13 @@ static void shader_dump_instruction(struct vkd3d_d3d_asm_compiler *compiler,
|
|
for (i = 0; i < ins->dst_count; ++i)
|
|
{
|
|
shader_dump_ins_modifiers(compiler, &ins->dst[i]);
|
|
- shader_print_dst_param(compiler, !i ? " " : ", ", &ins->dst[i], false, "");
|
|
+ shader_print_dst_operand(compiler, !i ? " " : ", ", &ins->dst[i], false, "");
|
|
}
|
|
|
|
/* Other source tokens */
|
|
for (i = ins->dst_count; i < (ins->dst_count + ins->src_count); ++i)
|
|
{
|
|
- shader_print_src_param(compiler, !i ? " " : ", ", &ins->src[i - ins->dst_count], "");
|
|
+ shader_print_src_operand(compiler, !i ? " " : ", ", &ins->src[i - ins->dst_count], "");
|
|
}
|
|
break;
|
|
}
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
index 87a7d48acca..3d7c36d9319 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/d3dbc.c
|
|
@@ -561,7 +561,7 @@ static enum vkd3d_shader_register_type parse_register_type(
|
|
}
|
|
|
|
static void d3dbc_parse_register(struct vkd3d_shader_sm1_parser *d3dbc,
|
|
- struct vkd3d_shader_register *reg, uint32_t param, struct vkd3d_shader_src_param *rel_addr)
|
|
+ struct vkd3d_shader_register *reg, uint32_t param, struct vsir_src_operand *rel_addr)
|
|
{
|
|
enum vkd3d_shader_register_type reg_type;
|
|
unsigned int index_offset, idx_count;
|
|
@@ -584,18 +584,18 @@ static void d3dbc_parse_register(struct vkd3d_shader_sm1_parser *d3dbc,
|
|
reg->dimension = VSIR_DIMENSION_VEC4;
|
|
}
|
|
|
|
-static void shader_sm1_parse_src_param(struct vkd3d_shader_sm1_parser *sm1, uint32_t param,
|
|
- struct vkd3d_shader_src_param *rel_addr, struct vkd3d_shader_src_param *src)
|
|
+static void d3dbc_parse_src_operand(struct vkd3d_shader_sm1_parser *d3dbc,
|
|
+ uint32_t param, struct vsir_src_operand *rel_addr, struct vsir_src_operand *src)
|
|
{
|
|
- d3dbc_parse_register(sm1, &src->reg, param, rel_addr);
|
|
+ d3dbc_parse_register(d3dbc, &src->reg, param, rel_addr);
|
|
src->swizzle = swizzle_from_sm1((param & VKD3D_SM1_SWIZZLE_MASK) >> VKD3D_SM1_SWIZZLE_SHIFT);
|
|
src->modifiers = (param & VKD3D_SM1_SRC_MODIFIER_MASK) >> VKD3D_SM1_SRC_MODIFIER_SHIFT;
|
|
}
|
|
|
|
-static void shader_sm1_parse_dst_param(struct vkd3d_shader_sm1_parser *sm1, uint32_t param,
|
|
- struct vkd3d_shader_src_param *rel_addr, struct vkd3d_shader_dst_param *dst)
|
|
+static void d3dbc_parse_dst_operand(struct vkd3d_shader_sm1_parser *d3dbc,
|
|
+ uint32_t param, struct vsir_src_operand *rel_addr, struct vsir_dst_operand *dst)
|
|
{
|
|
- d3dbc_parse_register(sm1, &dst->reg, param, rel_addr);
|
|
+ d3dbc_parse_register(d3dbc, &dst->reg, param, rel_addr);
|
|
dst->modifiers = (param & VKD3D_SM1_DST_MODIFIER_MASK) >> VKD3D_SM1_DST_MODIFIER_SHIFT;
|
|
dst->shift = (param & VKD3D_SM1_DSTSHIFT_MASK) >> VKD3D_SM1_DSTSHIFT_SHIFT;
|
|
|
|
@@ -1039,51 +1039,51 @@ static void shader_sm1_skip_opcode(const struct vkd3d_shader_sm1_parser *sm1, co
|
|
*ptr += (opcode_info->dst_count + opcode_info->src_count);
|
|
}
|
|
|
|
-static void shader_sm1_read_src_param(struct vkd3d_shader_sm1_parser *sm1, const uint32_t **ptr,
|
|
- struct vkd3d_shader_src_param *src_param)
|
|
+static void d3dbc_read_src_operand(struct vkd3d_shader_sm1_parser *d3dbc,
|
|
+ const uint32_t **ptr, struct vsir_src_operand *src)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_rel_addr = NULL;
|
|
+ struct vsir_src_operand *src_rel_addr = NULL;
|
|
uint32_t token, addr_token;
|
|
|
|
- shader_sm1_read_param(sm1, ptr, &token, &addr_token);
|
|
+ shader_sm1_read_param(d3dbc, ptr, &token, &addr_token);
|
|
if (has_relative_address(token))
|
|
{
|
|
- if (!(src_rel_addr = vsir_program_get_src_params(sm1->program, 1)))
|
|
+ if (!(src_rel_addr = vsir_program_get_src_operands(d3dbc->program, 1)))
|
|
{
|
|
- vkd3d_shader_parser_error(&sm1->p, VKD3D_SHADER_ERROR_D3DBC_OUT_OF_MEMORY,
|
|
+ vkd3d_shader_parser_error(&d3dbc->p, VKD3D_SHADER_ERROR_D3DBC_OUT_OF_MEMORY,
|
|
"Out of memory.");
|
|
- sm1->abort = true;
|
|
+ d3dbc->abort = true;
|
|
return;
|
|
}
|
|
- shader_sm1_parse_src_param(sm1, addr_token, NULL, src_rel_addr);
|
|
+ d3dbc_parse_src_operand(d3dbc, addr_token, NULL, src_rel_addr);
|
|
}
|
|
- shader_sm1_parse_src_param(sm1, token, src_rel_addr, src_param);
|
|
+ d3dbc_parse_src_operand(d3dbc, token, src_rel_addr, src);
|
|
}
|
|
|
|
-static void shader_sm1_read_dst_param(struct vkd3d_shader_sm1_parser *sm1, const uint32_t **ptr,
|
|
- struct vkd3d_shader_dst_param *dst_param)
|
|
+static void d3dbc_read_dst_operand(struct vkd3d_shader_sm1_parser *d3dbc,
|
|
+ const uint32_t **ptr, struct vsir_dst_operand *dst)
|
|
{
|
|
- struct vkd3d_shader_src_param *dst_rel_addr = NULL;
|
|
+ struct vsir_src_operand *dst_rel_addr = NULL;
|
|
uint32_t token, addr_token;
|
|
|
|
- shader_sm1_read_param(sm1, ptr, &token, &addr_token);
|
|
+ shader_sm1_read_param(d3dbc, ptr, &token, &addr_token);
|
|
if (has_relative_address(token))
|
|
{
|
|
- if (!(dst_rel_addr = vsir_program_get_src_params(sm1->program, 1)))
|
|
+ if (!(dst_rel_addr = vsir_program_get_src_operands(d3dbc->program, 1)))
|
|
{
|
|
- vkd3d_shader_parser_error(&sm1->p, VKD3D_SHADER_ERROR_D3DBC_OUT_OF_MEMORY,
|
|
+ vkd3d_shader_parser_error(&d3dbc->p, VKD3D_SHADER_ERROR_D3DBC_OUT_OF_MEMORY,
|
|
"Out of memory.");
|
|
- sm1->abort = true;
|
|
+ d3dbc->abort = true;
|
|
return;
|
|
}
|
|
- shader_sm1_parse_src_param(sm1, addr_token, NULL, dst_rel_addr);
|
|
+ d3dbc_parse_src_operand(d3dbc, addr_token, NULL, dst_rel_addr);
|
|
}
|
|
- shader_sm1_parse_dst_param(sm1, token, dst_rel_addr, dst_param);
|
|
+ d3dbc_parse_dst_operand(d3dbc, token, dst_rel_addr, dst);
|
|
|
|
- if (dst_param->reg.type == VKD3DSPR_RASTOUT && dst_param->reg.idx[0].offset == VSIR_RASTOUT_POINT_SIZE)
|
|
- sm1->program->has_point_size = true;
|
|
- if (dst_param->reg.type == VKD3DSPR_RASTOUT && dst_param->reg.idx[0].offset == VSIR_RASTOUT_FOG)
|
|
- sm1->program->has_fog = true;
|
|
+ if (dst->reg.type == VKD3DSPR_RASTOUT && dst->reg.idx[0].offset == VSIR_RASTOUT_POINT_SIZE)
|
|
+ d3dbc->program->has_point_size = true;
|
|
+ if (dst->reg.type == VKD3DSPR_RASTOUT && dst->reg.idx[0].offset == VSIR_RASTOUT_FOG)
|
|
+ d3dbc->program->has_fog = true;
|
|
}
|
|
|
|
static void shader_sm1_read_semantic(struct vkd3d_shader_sm1_parser *sm1,
|
|
@@ -1121,7 +1121,7 @@ static void shader_sm1_read_semantic(struct vkd3d_shader_sm1_parser *sm1,
|
|
semantic->resource_data_type[1] = VSIR_DATA_F32;
|
|
semantic->resource_data_type[2] = VSIR_DATA_F32;
|
|
semantic->resource_data_type[3] = VSIR_DATA_F32;
|
|
- shader_sm1_parse_dst_param(sm1, dst_token, NULL, &semantic->resource.reg);
|
|
+ d3dbc_parse_dst_operand(sm1, dst_token, NULL, &semantic->resource.reg);
|
|
range = &semantic->resource.range;
|
|
range->space = 0;
|
|
range->first = range->last = semantic->resource.reg.reg.idx[0].offset;
|
|
@@ -1135,7 +1135,7 @@ static void shader_sm1_read_semantic(struct vkd3d_shader_sm1_parser *sm1,
|
|
}
|
|
|
|
static void shader_sm1_read_immconst(struct vkd3d_shader_sm1_parser *sm1, const uint32_t **ptr,
|
|
- struct vkd3d_shader_src_param *src_param, enum vsir_dimension dimension, enum vsir_data_type data_type)
|
|
+ struct vsir_src_operand *src, enum vsir_dimension dimension, enum vsir_data_type data_type)
|
|
{
|
|
unsigned int count = dimension == VSIR_DIMENSION_VEC4 ? 4 : 1;
|
|
|
|
@@ -1148,21 +1148,10 @@ static void shader_sm1_read_immconst(struct vkd3d_shader_sm1_parser *sm1, const
|
|
return;
|
|
}
|
|
|
|
- src_param->reg.type = VKD3DSPR_IMMCONST;
|
|
- src_param->reg.precision = VKD3D_SHADER_REGISTER_PRECISION_DEFAULT;
|
|
- src_param->reg.non_uniform = false;
|
|
- src_param->reg.data_type = data_type;
|
|
- src_param->reg.idx[0].offset = ~0u;
|
|
- src_param->reg.idx[0].rel_addr = NULL;
|
|
- src_param->reg.idx[1].offset = ~0u;
|
|
- src_param->reg.idx[1].rel_addr = NULL;
|
|
- src_param->reg.idx[2].offset = ~0u;
|
|
- src_param->reg.idx[2].rel_addr = NULL;
|
|
- src_param->reg.idx_count = 0;
|
|
- src_param->reg.dimension = dimension;
|
|
- memcpy(src_param->reg.u.immconst_u32, *ptr, count * sizeof(uint32_t));
|
|
- src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
- src_param->modifiers = 0;
|
|
+ vsir_src_operand_init(src, VKD3DSPR_IMMCONST, data_type, 0);
|
|
+ src->reg.dimension = dimension;
|
|
+ memcpy(src->reg.u.immconst_u32, *ptr, count * sizeof(uint32_t));
|
|
+ src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
|
|
*ptr += count;
|
|
}
|
|
@@ -1283,12 +1272,12 @@ static unsigned int mask_from_swizzle(uint32_t swizzle)
|
|
|
|
static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, struct vkd3d_shader_instruction *ins)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params, *predicate;
|
|
const struct vkd3d_sm1_opcode_info *opcode_info;
|
|
struct vsir_program *program = sm1->program;
|
|
unsigned int vsir_dst_count, vsir_src_count;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
+ struct vsir_src_operand *src, *predicate;
|
|
const uint32_t **ptr = &sm1->ptr;
|
|
+ struct vsir_dst_operand *dst;
|
|
uint32_t opcode_token;
|
|
const uint32_t *p;
|
|
bool predicated;
|
|
@@ -1330,12 +1319,12 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, str
|
|
ins->raw = false;
|
|
ins->structured = false;
|
|
predicated = !!(opcode_token & VKD3D_SM1_INSTRUCTION_PREDICATED);
|
|
- ins->predicate = predicate = predicated ? vsir_program_get_src_params(program, 1) : NULL;
|
|
+ ins->predicate = predicate = predicated ? vsir_program_get_src_operands(program, 1) : NULL;
|
|
ins->dst_count = vsir_dst_count;
|
|
- ins->dst = dst_param = vsir_program_get_dst_params(program, ins->dst_count);
|
|
+ ins->dst = dst = vsir_program_get_dst_operands(program, ins->dst_count);
|
|
ins->src_count = vsir_src_count;
|
|
- ins->src = src_params = vsir_program_get_src_params(program, ins->src_count);
|
|
- if ((!predicate && predicated) || (!src_params && ins->src_count) || (!dst_param && ins->dst_count))
|
|
+ ins->src = src = vsir_program_get_src_operands(program, ins->src_count);
|
|
+ if ((!predicate && predicated) || (!src && ins->src_count) || (!dst && ins->dst_count))
|
|
{
|
|
vkd3d_shader_parser_error(&sm1->p, VKD3D_SHADER_ERROR_D3DBC_OUT_OF_MEMORY, "Out of memory.");
|
|
goto fail;
|
|
@@ -1365,21 +1354,21 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, str
|
|
}
|
|
else if (ins->opcode == VSIR_OP_DEF)
|
|
{
|
|
- shader_sm1_read_dst_param(sm1, &p, dst_param);
|
|
- shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_VEC4, VSIR_DATA_F32);
|
|
- shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true);
|
|
+ d3dbc_read_dst_operand(sm1, &p, dst);
|
|
+ shader_sm1_read_immconst(sm1, &p, &src[0], VSIR_DIMENSION_VEC4, VSIR_DATA_F32);
|
|
+ shader_sm1_scan_register(sm1, &dst->reg, dst->write_mask, true);
|
|
}
|
|
else if (ins->opcode == VSIR_OP_DEFB)
|
|
{
|
|
- shader_sm1_read_dst_param(sm1, &p, dst_param);
|
|
- shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_SCALAR, VSIR_DATA_U32);
|
|
- shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true);
|
|
+ d3dbc_read_dst_operand(sm1, &p, dst);
|
|
+ shader_sm1_read_immconst(sm1, &p, &src[0], VSIR_DIMENSION_SCALAR, VSIR_DATA_U32);
|
|
+ shader_sm1_scan_register(sm1, &dst->reg, dst->write_mask, true);
|
|
}
|
|
else if (ins->opcode == VSIR_OP_DEFI)
|
|
{
|
|
- shader_sm1_read_dst_param(sm1, &p, dst_param);
|
|
- shader_sm1_read_immconst(sm1, &p, &src_params[0], VSIR_DIMENSION_VEC4, VSIR_DATA_I32);
|
|
- shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, true);
|
|
+ d3dbc_read_dst_operand(sm1, &p, dst);
|
|
+ shader_sm1_read_immconst(sm1, &p, &src[0], VSIR_DIMENSION_VEC4, VSIR_DATA_I32);
|
|
+ shader_sm1_scan_register(sm1, &dst->reg, dst->write_mask, true);
|
|
}
|
|
else if (ins->opcode == VSIR_OP_TEXKILL)
|
|
{
|
|
@@ -1387,37 +1376,37 @@ static void shader_sm1_read_instruction(struct vkd3d_shader_sm1_parser *sm1, str
|
|
* semantically a source. Since we have multiple passes which operate
|
|
* generically on sources or destinations, normalize that. */
|
|
const struct vkd3d_shader_register *reg;
|
|
- struct vkd3d_shader_dst_param tmp_dst;
|
|
+ struct vsir_dst_operand tmp_dst;
|
|
|
|
reg = &tmp_dst.reg;
|
|
- shader_sm1_read_dst_param(sm1, &p, &tmp_dst);
|
|
+ d3dbc_read_dst_operand(sm1, &p, &tmp_dst);
|
|
shader_sm1_scan_register(sm1, reg, tmp_dst.write_mask, false);
|
|
|
|
- vsir_src_param_init(&src_params[0], reg->type, reg->data_type, reg->idx_count);
|
|
- src_params[0].reg = *reg;
|
|
- src_params[0].swizzle = vsir_swizzle_from_writemask(tmp_dst.write_mask);
|
|
+ vsir_src_operand_init(&src[0], reg->type, reg->data_type, reg->idx_count);
|
|
+ src[0].reg = *reg;
|
|
+ src[0].swizzle = vsir_swizzle_from_writemask(tmp_dst.write_mask);
|
|
|
|
if (ins->predicate)
|
|
- shader_sm1_read_src_param(sm1, &p, predicate);
|
|
+ d3dbc_read_src_operand(sm1, &p, predicate);
|
|
}
|
|
else
|
|
{
|
|
/* Destination token */
|
|
if (ins->dst_count)
|
|
{
|
|
- shader_sm1_read_dst_param(sm1, &p, dst_param);
|
|
- shader_sm1_scan_register(sm1, &dst_param->reg, dst_param->write_mask, false);
|
|
+ d3dbc_read_dst_operand(sm1, &p, dst);
|
|
+ shader_sm1_scan_register(sm1, &dst->reg, dst->write_mask, false);
|
|
}
|
|
|
|
/* Predication token */
|
|
if (ins->predicate)
|
|
- shader_sm1_read_src_param(sm1, &p, predicate);
|
|
+ d3dbc_read_src_operand(sm1, &p, predicate);
|
|
|
|
/* Other source tokens */
|
|
for (i = 0; i < ins->src_count; ++i)
|
|
{
|
|
- shader_sm1_read_src_param(sm1, &p, &src_params[i]);
|
|
- shader_sm1_scan_register(sm1, &src_params[i].reg, mask_from_swizzle(src_params[i].swizzle), false);
|
|
+ d3dbc_read_src_operand(sm1, &p, &src[i]);
|
|
+ shader_sm1_scan_register(sm1, &src[i].reg, mask_from_swizzle(src[i].swizzle), false);
|
|
}
|
|
}
|
|
|
|
@@ -1522,6 +1511,8 @@ static enum vkd3d_result shader_sm1_init(struct vkd3d_shader_sm1_parser *sm1, st
|
|
code_size != ~(size_t)0 ? token_count / 4u + 4 : 16, VSIR_CF_STRUCTURED, normalisation_level))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
+ program->f32_denorm_mode = VSIR_DENORM_FLUSH_TO_ZERO;
|
|
+
|
|
vkd3d_shader_parser_init(&sm1->p, message_context, compile_info->source_name);
|
|
sm1->program = program;
|
|
sm1->ptr = sm1->start;
|
|
@@ -1595,7 +1586,7 @@ int d3dbc_parse(const struct vkd3d_shader_compile_info *compile_info, uint64_t c
|
|
}
|
|
}
|
|
|
|
- program->has_descriptor_info = true;
|
|
+ program->normalisation_flags.has_descriptor_info = true;
|
|
|
|
if (TRACE_ON())
|
|
vsir_program_trace(program);
|
|
@@ -1863,8 +1854,8 @@ static uint32_t swizzle_from_vsir(uint32_t swizzle)
|
|
|
|
static bool is_inconsequential_instr(const struct vkd3d_shader_instruction *ins)
|
|
{
|
|
- const struct vkd3d_shader_dst_param *dst = &ins->dst[0];
|
|
- const struct vkd3d_shader_src_param *src = &ins->src[0];
|
|
+ const struct vsir_src_operand *src = &ins->src[0];
|
|
+ const struct vsir_dst_operand *dst = &ins->dst[0];
|
|
unsigned int i;
|
|
|
|
if (ins->opcode != VSIR_OP_MOV)
|
|
@@ -1887,7 +1878,7 @@ static bool is_inconsequential_instr(const struct vkd3d_shader_instruction *ins)
|
|
return true;
|
|
}
|
|
|
|
-static void write_sm1_dst_register(struct vkd3d_bytecode_buffer *buffer, const struct vkd3d_shader_dst_param *reg)
|
|
+static void write_sm1_dst_register(struct vkd3d_bytecode_buffer *buffer, const struct vsir_dst_operand *reg)
|
|
{
|
|
uint32_t offset = reg->reg.idx_count ? reg->reg.idx[0].offset : 0;
|
|
|
|
@@ -1899,7 +1890,7 @@ static void write_sm1_dst_register(struct vkd3d_bytecode_buffer *buffer, const s
|
|
| (offset & VKD3D_SM1_REGISTER_NUMBER_MASK));
|
|
}
|
|
|
|
-static void write_sm1_src_register(struct vkd3d_bytecode_buffer *buffer, const struct vkd3d_shader_src_param *reg)
|
|
+static void write_sm1_src_register(struct vkd3d_bytecode_buffer *buffer, const struct vsir_src_operand *reg)
|
|
{
|
|
uint32_t address_mode = VKD3D_SM1_ADDRESS_MODE_ABSOLUTE, offset = 0;
|
|
|
|
@@ -1922,8 +1913,8 @@ static void d3dbc_write_instruction(struct d3dbc_compiler *d3dbc, const struct v
|
|
{
|
|
const struct vkd3d_shader_version *version = &d3dbc->program->shader_version;
|
|
struct vkd3d_bytecode_buffer *buffer = &d3dbc->buffer;
|
|
- const struct vkd3d_shader_src_param *src;
|
|
const struct vkd3d_sm1_opcode_info *info;
|
|
+ const struct vsir_src_operand *src;
|
|
size_t size, token_position;
|
|
unsigned int i;
|
|
uint32_t token;
|
|
@@ -1971,12 +1962,12 @@ static void d3dbc_write_texkill(struct d3dbc_compiler *d3dbc, const struct vkd3d
|
|
{
|
|
const struct vkd3d_shader_register *reg = &ins->src[0].reg;
|
|
struct vkd3d_shader_instruction tmp;
|
|
- struct vkd3d_shader_dst_param dst;
|
|
+ struct vsir_dst_operand dst;
|
|
|
|
/* TEXKILL, uniquely, encodes its argument as a destination, when it is
|
|
* semantically a source. We store it as a source in vsir, so convert it. */
|
|
|
|
- vsir_dst_param_init(&dst, reg->type, reg->data_type, reg->idx_count);
|
|
+ vsir_dst_operand_init(&dst, reg->type, reg->data_type, reg->idx_count);
|
|
dst.reg = *reg;
|
|
dst.write_mask = mask_from_swizzle(ins->src[0].swizzle);
|
|
|
|
@@ -1994,7 +1985,7 @@ static void d3dbc_write_vsir_def(struct d3dbc_compiler *d3dbc, const struct vkd3
|
|
struct vkd3d_bytecode_buffer *buffer = &d3dbc->buffer;
|
|
uint32_t token;
|
|
|
|
- const struct vkd3d_shader_dst_param reg =
|
|
+ const struct vsir_dst_operand reg =
|
|
{
|
|
.reg.type = VKD3DSPR_CONST,
|
|
.write_mask = VKD3DSP_WRITEMASK_ALL,
|
|
@@ -2017,7 +2008,7 @@ static void d3dbc_write_vsir_sampler_dcl(struct d3dbc_compiler *d3dbc,
|
|
{
|
|
const struct vkd3d_shader_version *version = &d3dbc->program->shader_version;
|
|
struct vkd3d_bytecode_buffer *buffer = &d3dbc->buffer;
|
|
- struct vkd3d_shader_dst_param reg = {0};
|
|
+ struct vsir_dst_operand reg = {0};
|
|
uint32_t token;
|
|
|
|
token = VKD3D_SM1_OP_DCL;
|
|
@@ -2156,7 +2147,7 @@ static void d3dbc_write_semantic_dcl(struct d3dbc_compiler *d3dbc,
|
|
{
|
|
const struct vkd3d_shader_version *version = &d3dbc->program->shader_version;
|
|
struct vkd3d_bytecode_buffer *buffer = &d3dbc->buffer;
|
|
- struct vkd3d_shader_dst_param reg = {0};
|
|
+ struct vsir_dst_operand reg = {0};
|
|
enum vkd3d_decl_usage usage;
|
|
uint32_t token, usage_idx;
|
|
bool ret;
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/dxbc.c b/libs/vkd3d/libs/vkd3d-shader/dxbc.c
|
|
index 45a45c3ad4a..f1533fbcd54 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/dxbc.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/dxbc.c
|
|
@@ -340,11 +340,7 @@ int vkd3d_shader_parse_dxbc(const struct vkd3d_shader_code *dxbc,
|
|
ret = parse_dxbc(dxbc, &message_context, NULL, flags, desc);
|
|
|
|
vkd3d_shader_message_context_trace_messages(&message_context);
|
|
- if (!vkd3d_shader_message_context_copy_messages(&message_context, messages) && ret >= 0)
|
|
- {
|
|
- vkd3d_shader_free_dxbc(desc);
|
|
- ret = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
- }
|
|
+ vkd3d_shader_string_from_message_context(messages, &message_context);
|
|
vkd3d_shader_message_context_cleanup(&message_context);
|
|
|
|
if (ret < 0)
|
|
@@ -1106,9 +1102,7 @@ int vkd3d_shader_parse_root_signature(const struct vkd3d_shader_code *dxbc,
|
|
|
|
ret = for_each_dxbc_section(dxbc, &message_context, NULL, rts0_handler, root_signature);
|
|
vkd3d_shader_message_context_trace_messages(&message_context);
|
|
- if (!vkd3d_shader_message_context_copy_messages(&message_context, messages))
|
|
- ret = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
-
|
|
+ vkd3d_shader_string_from_message_context(messages, &message_context);
|
|
vkd3d_shader_message_context_cleanup(&message_context);
|
|
if (ret < 0)
|
|
vkd3d_shader_free_root_signature(root_signature);
|
|
@@ -1558,8 +1552,7 @@ int vkd3d_shader_serialize_root_signature(const struct vkd3d_shader_versioned_ro
|
|
|
|
done:
|
|
vkd3d_shader_message_context_trace_messages(&context.message_context);
|
|
- if (!vkd3d_shader_message_context_copy_messages(&context.message_context, messages))
|
|
- ret = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ vkd3d_shader_string_from_message_context(messages, &context.message_context);
|
|
vkd3d_shader_message_context_cleanup(&context.message_context);
|
|
return ret;
|
|
}
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/dxil.c b/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
index 9f25ae8334b..7a056775a16 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/dxil.c
|
|
@@ -176,6 +176,16 @@ enum bitcode_value_symtab_code
|
|
VST_CODE_BBENTRY = 2,
|
|
};
|
|
|
|
+enum bitcode_paramattr_code
|
|
+{
|
|
+ PARAMATTR_CODE_ENTRY = 2,
|
|
+};
|
|
+
|
|
+enum bitcode_paramattr_group_code
|
|
+{
|
|
+ PARAMATTR_GRP_CODE_ENTRY = 3,
|
|
+};
|
|
+
|
|
enum bitcode_linkage
|
|
{
|
|
LINKAGE_EXTERNAL = 0,
|
|
@@ -596,10 +606,20 @@ struct sm6_pointer_info
|
|
enum bitcode_address_space addr_space;
|
|
};
|
|
|
|
+enum dxil_well_known_structs
|
|
+{
|
|
+ WELL_KNOWN_NONE = 0,
|
|
+ WELL_KNOWN_HANDLE,
|
|
+ WELL_KNOWN_DIMENSIONS,
|
|
+ WELL_KNOWN_SPLITDOUBLE,
|
|
+ WELL_KNOWN_FOURI32,
|
|
+};
|
|
+
|
|
struct sm6_struct_info
|
|
{
|
|
const char *name;
|
|
unsigned int elem_count;
|
|
+ enum dxil_well_known_structs well_known;
|
|
const struct sm6_type *elem_types[];
|
|
};
|
|
|
|
@@ -748,67 +768,15 @@ struct sm6_symbol
|
|
|
|
struct incoming_value
|
|
{
|
|
- const struct sm6_block *block;
|
|
- struct vkd3d_shader_register reg;
|
|
-};
|
|
-
|
|
-struct sm6_phi
|
|
-{
|
|
- struct sm6_value value;
|
|
- struct incoming_value *incoming;
|
|
- size_t incoming_capacity;
|
|
- size_t incoming_count;
|
|
-};
|
|
-
|
|
-enum sm6_block_terminator_type
|
|
-{
|
|
- TERMINATOR_UNCOND_BR,
|
|
- TERMINATOR_COND_BR,
|
|
- TERMINATOR_SWITCH,
|
|
- TERMINATOR_RET,
|
|
-};
|
|
-
|
|
-struct terminator_case
|
|
-{
|
|
- const struct sm6_block *block;
|
|
- uint64_t value;
|
|
- bool is_default;
|
|
-};
|
|
-
|
|
-struct sm6_block_terminator
|
|
-{
|
|
- struct vkd3d_shader_register conditional_reg;
|
|
- enum sm6_block_terminator_type type;
|
|
- const struct sm6_block *true_block;
|
|
- const struct sm6_block *false_block;
|
|
- struct terminator_case *cases;
|
|
- unsigned int case_count;
|
|
-};
|
|
-
|
|
-struct sm6_block
|
|
-{
|
|
- struct vkd3d_shader_instruction *instructions;
|
|
- size_t instruction_capacity;
|
|
- size_t instruction_count;
|
|
-
|
|
- /* A nonzero id. */
|
|
- unsigned int id;
|
|
-
|
|
- struct sm6_phi *phi;
|
|
- size_t phi_capacity;
|
|
- size_t phi_count;
|
|
-
|
|
- struct sm6_block_terminator terminator;
|
|
+ unsigned int block_idx;
|
|
+ const struct sm6_value *src;
|
|
};
|
|
|
|
struct sm6_function
|
|
{
|
|
const struct sm6_value *declaration;
|
|
-
|
|
- struct sm6_block **blocks;
|
|
- size_t block_capacity;
|
|
+ struct vkd3d_shader_instruction_array instructions;
|
|
size_t block_count;
|
|
-
|
|
size_t value_count;
|
|
};
|
|
|
|
@@ -895,6 +863,44 @@ struct sm6_descriptor_info
|
|
enum vsir_data_type reg_data_type;
|
|
};
|
|
|
|
+struct dxil_parameter_attribute
|
|
+{
|
|
+ uint64_t *groups;
|
|
+ size_t group_count;
|
|
+};
|
|
+
|
|
+enum dxil_attribute_kind
|
|
+{
|
|
+ ATTRIBUTE_WELL_KNOWN = 0,
|
|
+ ATTRIBUTE_WELL_KNOWN_WITH_INTEGER_VALUE = 1,
|
|
+ ATTRIBUTE_STRING = 3,
|
|
+ ATTRIBUTE_STRING_WITH_STRING_VALUE = 4,
|
|
+};
|
|
+
|
|
+struct dxil_attribute
|
|
+{
|
|
+ enum dxil_attribute_kind kind;
|
|
+ union
|
|
+ {
|
|
+ uint64_t well_known;
|
|
+ const char *string;
|
|
+ } key;
|
|
+ union
|
|
+ {
|
|
+ uint64_t numeric;
|
|
+ const char *string;
|
|
+ } value;
|
|
+};
|
|
+
|
|
+struct dxil_attribute_group
|
|
+{
|
|
+ unsigned int group_id;
|
|
+ unsigned int parameter_idx;
|
|
+ struct dxil_attribute *attributes;
|
|
+ size_t attribute_count;
|
|
+ size_t attribute_capacity;
|
|
+};
|
|
+
|
|
struct sm6_parser
|
|
{
|
|
const uint32_t *ptr, *start, *end;
|
|
@@ -921,11 +927,11 @@ struct sm6_parser
|
|
const char *entry_point;
|
|
const char *patch_constant_function;
|
|
|
|
- struct vkd3d_shader_dst_param *output_params;
|
|
- struct vkd3d_shader_dst_param *input_params;
|
|
- struct vkd3d_shader_dst_param *patch_constant_params;
|
|
+ struct vsir_dst_operand *output_params;
|
|
+ struct vsir_dst_operand *input_params;
|
|
+ struct vsir_dst_operand *patch_constant_params;
|
|
uint32_t io_regs_declared[VKD3D_BITMAP_SIZE(VKD3DSPR_COUNT)];
|
|
- struct vkd3d_shader_src_param *outpointid_param;
|
|
+ struct vsir_src_operand *outpointid_param;
|
|
|
|
struct sm6_function *functions;
|
|
size_t function_count;
|
|
@@ -948,6 +954,12 @@ struct sm6_parser
|
|
size_t cur_max_value;
|
|
unsigned int ssa_next_id;
|
|
|
|
+ struct dxil_parameter_attribute *parameter_attributes;
|
|
+ size_t parameter_attribute_count;
|
|
+
|
|
+ struct dxil_attribute_group *attribute_groups;
|
|
+ size_t attribute_group_count;
|
|
+
|
|
struct vkd3d_shader_parser p;
|
|
};
|
|
|
|
@@ -1630,6 +1642,50 @@ static char *dxil_record_to_string(const struct dxil_record *record, unsigned in
|
|
return str;
|
|
}
|
|
|
|
+static char *dxil_record_to_zero_terminated_string(const struct dxil_record *record,
|
|
+ unsigned int *offset, struct sm6_parser *dxil)
|
|
+{
|
|
+ size_t str_len = 0, str_capacity = 0;
|
|
+ char *str = NULL;
|
|
+ unsigned int i;
|
|
+
|
|
+ VKD3D_ASSERT(*offset < record->operand_count);
|
|
+
|
|
+ for (i = *offset; i < record->operand_count; ++i)
|
|
+ {
|
|
+ if (record->operands[i] > UCHAR_MAX)
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_STRING,
|
|
+ "Operand value %"PRIu64" is not a valid string character.", record->operands[i]);
|
|
+ vkd3d_free(str);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!vkd3d_array_reserve((void **)&str, &str_capacity, str_len + 1, sizeof(*str)))
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
+ "Out of memory allocating a string of length %zu.", str_len + 1);
|
|
+ vkd3d_free(str);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (!(str[str_len++] = record->operands[i]))
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (i == record->operand_count)
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_STRING,
|
|
+ "String is not zero-terminated.");
|
|
+ vkd3d_free(str);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ *offset = i + 1;
|
|
+
|
|
+ return str;
|
|
+}
|
|
+
|
|
static bool dxil_record_validate_operand_min_count(const struct dxil_record *record, unsigned int min_count,
|
|
struct sm6_parser *sm6)
|
|
{
|
|
@@ -1660,6 +1716,8 @@ static bool dxil_record_validate_operand_count(const struct dxil_record *record,
|
|
return dxil_record_validate_operand_min_count(record, min_count, sm6);
|
|
}
|
|
|
|
+static void dxil_recognise_well_known_struct(struct sm6_parser *dxil, struct sm6_struct_info *info);
|
|
+
|
|
static enum vkd3d_result sm6_parser_type_table_init(struct sm6_parser *sm6)
|
|
{
|
|
const struct dxil_record *record;
|
|
@@ -1875,11 +1933,14 @@ static enum vkd3d_result sm6_parser_type_table_init(struct sm6_parser *sm6)
|
|
break;
|
|
}
|
|
|
|
- if (!strcmp(struct_name, "dx.types.Handle"))
|
|
- sm6->handle_type = type;
|
|
-
|
|
type->u.struc->name = struct_name;
|
|
struct_name = NULL;
|
|
+
|
|
+ dxil_recognise_well_known_struct(sm6, type->u.struc);
|
|
+
|
|
+ if (type->u.struc->well_known == WELL_KNOWN_HANDLE)
|
|
+ sm6->handle_type = type;
|
|
+
|
|
break;
|
|
|
|
case TYPE_CODE_STRUCT_NAME:
|
|
@@ -2034,7 +2095,7 @@ static inline bool sm6_type_is_function_pointer(const struct sm6_type *type)
|
|
|
|
static inline bool sm6_type_is_handle(const struct sm6_type *type)
|
|
{
|
|
- return sm6_type_is_struct(type) && !strcmp(type->u.struc->name, "dx.types.Handle");
|
|
+ return sm6_type_is_struct(type) && type->u.struc->well_known == WELL_KNOWN_HANDLE;
|
|
}
|
|
|
|
static const struct sm6_type *sm6_type_get_pointer_to_type(const struct sm6_type *type,
|
|
@@ -2055,6 +2116,79 @@ static const struct sm6_type *sm6_type_get_pointer_to_type(const struct sm6_type
|
|
return NULL;
|
|
}
|
|
|
|
+static void dxil_recognise_well_known_struct(struct sm6_parser *dxil, struct sm6_struct_info *info)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ info->well_known = WELL_KNOWN_NONE;
|
|
+
|
|
+ if (!info->name)
|
|
+ return;
|
|
+
|
|
+ if (!strcmp(info->name, "dx.types.Dimensions"))
|
|
+ {
|
|
+ if (info->elem_count != 4)
|
|
+ goto error;
|
|
+ for (i = 0; i < 4; ++i)
|
|
+ {
|
|
+ if (!sm6_type_is_i32(info->elem_types[i]))
|
|
+ goto error;
|
|
+ }
|
|
+ info->well_known = WELL_KNOWN_DIMENSIONS;
|
|
+
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!strcmp(info->name, "dx.types.fouri32"))
|
|
+ {
|
|
+ if (info->elem_count != 4)
|
|
+ goto error;
|
|
+ for (i = 0; i < 4; ++i)
|
|
+ {
|
|
+ if (!sm6_type_is_i32(info->elem_types[i]))
|
|
+ goto error;
|
|
+ }
|
|
+ info->well_known = WELL_KNOWN_FOURI32;
|
|
+
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!strcmp(info->name, "dx.types.Handle"))
|
|
+ {
|
|
+ if (info->elem_count != 1)
|
|
+ goto error;
|
|
+ if (!sm6_type_is_pointer(info->elem_types[0]))
|
|
+ goto error;
|
|
+ if (!sm6_type_is_i8(info->elem_types[0]->u.pointer.type))
|
|
+ goto error;
|
|
+ if (info->elem_types[0]->u.pointer.addr_space != ADDRESS_SPACE_DEFAULT)
|
|
+ goto error;
|
|
+ info->well_known = WELL_KNOWN_HANDLE;
|
|
+
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (!strcmp(info->name, "dx.types.splitdouble"))
|
|
+ {
|
|
+ if (info->elem_count != 2)
|
|
+ goto error;
|
|
+ for (i = 0; i < 2; ++i)
|
|
+ {
|
|
+ if (!sm6_type_is_i32(info->elem_types[i]))
|
|
+ goto error;
|
|
+ }
|
|
+ info->well_known = WELL_KNOWN_SPLITDOUBLE;
|
|
+
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ return;
|
|
+
|
|
+error:
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
+ "Structure type `%s' has unexpected fields.", info->name);
|
|
+}
|
|
+
|
|
static const struct sm6_type *sm6_type_get_cmpxchg_result_struct(struct sm6_parser *sm6)
|
|
{
|
|
const struct sm6_type *type;
|
|
@@ -2444,12 +2578,12 @@ static void instruction_init_with_resource(struct vkd3d_shader_instruction *ins,
|
|
ins->structured = resource->u.handle.d->kind == RESOURCE_KIND_STRUCTUREDBUFFER;
|
|
}
|
|
|
|
-static struct vkd3d_shader_src_param *instruction_src_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
+static struct vsir_src_operand *instruction_src_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
unsigned int count, struct sm6_parser *sm6)
|
|
{
|
|
- struct vkd3d_shader_src_param *params;
|
|
+ struct vsir_src_operand *params;
|
|
|
|
- if (!(params = vsir_program_get_src_params(sm6->program, count)))
|
|
+ if (!(params = vsir_program_get_src_operands(sm6->program, count)))
|
|
{
|
|
ERR("Failed to allocate src params.\n");
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
@@ -2461,21 +2595,21 @@ static struct vkd3d_shader_src_param *instruction_src_params_alloc(struct vkd3d_
|
|
return params;
|
|
}
|
|
|
|
-static struct vkd3d_shader_dst_param *instruction_dst_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
+static struct vsir_dst_operand *instruction_dst_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
unsigned int count, struct sm6_parser *sm6)
|
|
{
|
|
- struct vkd3d_shader_dst_param *params;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
- if (!(params = vsir_program_get_dst_params(sm6->program, count)))
|
|
+ if (!(dst = vsir_program_get_dst_operands(sm6->program, count)))
|
|
{
|
|
- ERR("Failed to allocate dst params.\n");
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
- "Out of memory allocating instruction dst parameters.");
|
|
+ "Out of memory allocating instruction destination operands.");
|
|
return NULL;
|
|
}
|
|
- ins->dst = params;
|
|
+ ins->dst = dst;
|
|
ins->dst_count = count;
|
|
- return params;
|
|
+
|
|
+ return dst;
|
|
}
|
|
|
|
static void register_init_with_id(struct vkd3d_shader_register *reg,
|
|
@@ -2700,41 +2834,41 @@ static void register_make_constant_uint(struct vkd3d_shader_register *reg, unsig
|
|
reg->u.immconst_u32[0] = value;
|
|
}
|
|
|
|
-static void dst_param_init(struct vkd3d_shader_dst_param *param)
|
|
+static void dst_param_init(struct vsir_dst_operand *param)
|
|
{
|
|
param->write_mask = VKD3DSP_WRITEMASK_0;
|
|
param->modifiers = 0;
|
|
param->shift = 0;
|
|
}
|
|
|
|
-static void dst_param_init_with_mask(struct vkd3d_shader_dst_param *param, unsigned int mask)
|
|
+static void dst_param_init_with_mask(struct vsir_dst_operand *param, unsigned int mask)
|
|
{
|
|
param->write_mask = mask;
|
|
param->modifiers = 0;
|
|
param->shift = 0;
|
|
}
|
|
|
|
-static inline void dst_param_init_scalar(struct vkd3d_shader_dst_param *param, unsigned int component_idx)
|
|
+static inline void dst_param_init_scalar(struct vsir_dst_operand *param, unsigned int component_idx)
|
|
{
|
|
param->write_mask = 1u << component_idx;
|
|
param->modifiers = 0;
|
|
param->shift = 0;
|
|
}
|
|
|
|
-static void dst_param_init_vector(struct vkd3d_shader_dst_param *param, unsigned int component_count)
|
|
+static void dst_param_init_vector(struct vsir_dst_operand *param, unsigned int component_count)
|
|
{
|
|
param->write_mask = (1u << component_count) - 1;
|
|
param->modifiers = 0;
|
|
param->shift = 0;
|
|
}
|
|
|
|
-static inline void src_param_init(struct vkd3d_shader_src_param *param)
|
|
+static inline void src_param_init(struct vsir_src_operand *param)
|
|
{
|
|
param->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
param->modifiers = VKD3DSPSM_NONE;
|
|
}
|
|
|
|
-static void src_param_init_scalar(struct vkd3d_shader_src_param *param, unsigned int component_idx)
|
|
+static void src_param_init_scalar(struct vsir_src_operand *param, unsigned int component_idx)
|
|
{
|
|
param->swizzle = vkd3d_shader_create_swizzle(component_idx, component_idx, component_idx, component_idx);
|
|
if (data_type_is_64_bit(param->reg.data_type))
|
|
@@ -2742,20 +2876,20 @@ static void src_param_init_scalar(struct vkd3d_shader_src_param *param, unsigned
|
|
param->modifiers = VKD3DSPSM_NONE;
|
|
}
|
|
|
|
-static void src_param_init_vector(struct vkd3d_shader_src_param *param, unsigned int component_count)
|
|
+static void src_param_init_vector(struct vsir_src_operand *param, unsigned int component_count)
|
|
{
|
|
param->swizzle = VKD3D_SHADER_NO_SWIZZLE & ((1ull << VKD3D_SHADER_SWIZZLE_SHIFT(component_count)) - 1);
|
|
param->modifiers = VKD3DSPSM_NONE;
|
|
}
|
|
|
|
-static void src_param_init_from_value(struct vkd3d_shader_src_param *param,
|
|
+static void src_param_init_from_value(struct vsir_src_operand *param,
|
|
const struct sm6_value *src, uint32_t type_flags, struct sm6_parser *dxil)
|
|
{
|
|
src_param_init(param);
|
|
vsir_register_from_dxil_value(¶m->reg, src, type_flags, dxil);
|
|
}
|
|
|
|
-static void src_param_init_vector_from_reg(struct vkd3d_shader_src_param *param,
|
|
+static void src_param_init_vector_from_reg(struct vsir_src_operand *param,
|
|
const struct vkd3d_shader_register *reg)
|
|
{
|
|
param->swizzle = (reg->dimension == VSIR_DIMENSION_VEC4) ? VKD3D_SHADER_NO_SWIZZLE : VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
@@ -2763,15 +2897,17 @@ static void src_param_init_vector_from_reg(struct vkd3d_shader_src_param *param,
|
|
param->reg = *reg;
|
|
}
|
|
|
|
-static void src_param_make_constant_uint(struct vkd3d_shader_src_param *param, unsigned int value)
|
|
+static void src_param_make_constant_uint(struct vsir_src_operand *param, unsigned int value)
|
|
{
|
|
src_param_init(param);
|
|
register_make_constant_uint(¶m->reg, value);
|
|
}
|
|
|
|
-static void register_index_address_init(struct vkd3d_shader_register_index *idx, const struct sm6_value *address,
|
|
- struct sm6_parser *sm6)
|
|
+static void register_index_address_init(struct vkd3d_shader_register_index *idx,
|
|
+ const struct sm6_value *address, struct sm6_parser *sm6)
|
|
{
|
|
+ struct vsir_src_operand *rel_addr;
|
|
+
|
|
if (address && sm6_value_is_constant(address))
|
|
{
|
|
idx->offset = sm6_value_get_constant_uint(address, sm6);
|
|
@@ -2784,8 +2920,7 @@ static void register_index_address_init(struct vkd3d_shader_register_index *idx,
|
|
}
|
|
else
|
|
{
|
|
- struct vkd3d_shader_src_param *rel_addr = vsir_program_get_src_params(sm6->program, 1);
|
|
- if (rel_addr)
|
|
+ if ((rel_addr = vsir_program_get_src_operands(sm6->program, 1)))
|
|
src_param_init_from_value(rel_addr, address, 0, sm6);
|
|
idx->offset = 0;
|
|
idx->rel_addr = rel_addr;
|
|
@@ -2803,7 +2938,7 @@ static void sm6_register_from_handle(struct sm6_parser *sm6,
|
|
}
|
|
|
|
static void src_param_init_vector_from_handle(struct sm6_parser *sm6,
|
|
- struct vkd3d_shader_src_param *param, const struct sm6_handle_data *handle)
|
|
+ struct vsir_src_operand *param, const struct sm6_handle_data *handle)
|
|
{
|
|
struct vkd3d_shader_register reg;
|
|
|
|
@@ -2815,7 +2950,7 @@ static bool instruction_dst_param_init_ssa_scalar(struct vkd3d_shader_instructio
|
|
uint32_t type_flags, struct sm6_parser *dxil)
|
|
{
|
|
struct sm6_value *dst = sm6_parser_get_current_value(dxil);
|
|
- struct vkd3d_shader_dst_param *param;
|
|
+ struct vsir_dst_operand *param;
|
|
|
|
if (!(param = instruction_dst_params_alloc(ins, 1, dxil)))
|
|
return false;
|
|
@@ -2827,28 +2962,33 @@ static bool instruction_dst_param_init_ssa_scalar(struct vkd3d_shader_instructio
|
|
return true;
|
|
}
|
|
|
|
-static void instruction_dst_param_init_ssa_vector(struct vkd3d_shader_instruction *ins,
|
|
- unsigned int component_count, struct sm6_parser *sm6)
|
|
+static bool instruction_dst_param_init_ssa_vector(struct vkd3d_shader_instruction *ins,
|
|
+ unsigned int component_count, struct sm6_parser *dxil)
|
|
{
|
|
- struct vkd3d_shader_dst_param *param = instruction_dst_params_alloc(ins, 1, sm6);
|
|
- struct sm6_value *dst = sm6_parser_get_current_value(sm6);
|
|
+ struct sm6_value *dxil_dst = sm6_parser_get_current_value(dxil);
|
|
+ struct vsir_dst_operand *vsir_dst;
|
|
|
|
- dst_param_init_vector(param, component_count);
|
|
- sm6_parser_init_ssa_value(sm6, dst);
|
|
- vsir_register_from_dxil_value(¶m->reg, dst, 0, sm6);
|
|
+ if (!(vsir_dst = instruction_dst_params_alloc(ins, 1, dxil)))
|
|
+ return false;
|
|
+
|
|
+ dst_param_init_vector(vsir_dst, component_count);
|
|
+ sm6_parser_init_ssa_value(dxil, dxil_dst);
|
|
+ vsir_register_from_dxil_value(&vsir_dst->reg, dxil_dst, 0, dxil);
|
|
+
|
|
+ return true;
|
|
}
|
|
|
|
static bool instruction_dst_param_init_uint_temp_vector(struct vkd3d_shader_instruction *ins, struct sm6_parser *sm6)
|
|
{
|
|
- struct vkd3d_shader_dst_param *param;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
- if (!(param = instruction_dst_params_alloc(ins, 1, sm6)))
|
|
+ if (!(dst = instruction_dst_params_alloc(ins, 1, sm6)))
|
|
return false;
|
|
|
|
- vsir_dst_param_init(param, VKD3DSPR_TEMP, VSIR_DATA_U32, 1);
|
|
- param->write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
- param->reg.idx[0].offset = 0;
|
|
- param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_TEMP, VSIR_DATA_U32, 1);
|
|
+ dst->write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
+ dst->reg.idx[0].offset = 0;
|
|
+ dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
|
|
return true;
|
|
}
|
|
@@ -2890,27 +3030,24 @@ static size_t sm6_parser_compute_max_value_count(struct sm6_parser *sm6,
|
|
return value_count;
|
|
}
|
|
|
|
-static size_t sm6_parser_get_value_index(struct sm6_parser *sm6, uint64_t idx)
|
|
+static size_t sm6_parser_get_value_index(struct sm6_parser *dxil, uint32_t idx)
|
|
{
|
|
size_t i;
|
|
|
|
- /* The value relative index is 32 bits. */
|
|
- if (idx > UINT32_MAX)
|
|
- WARN("Ignoring upper 32 bits of relative index.\n");
|
|
- i = (uint32_t)sm6->value_count - (uint32_t)idx;
|
|
+ i = (uint32_t)dxil->value_count - idx;
|
|
|
|
- /* This may underflow to produce a forward reference, but it must not exceed the final value count. */
|
|
- if (i >= sm6->cur_max_value)
|
|
+ /* This may underflow to produce a forward reference, but it must not
|
|
+ * exceed the final value count. */
|
|
+ if (i >= dxil->cur_max_value)
|
|
{
|
|
- WARN("Invalid value index %"PRIx64" at %zu.\n", idx, sm6->value_count);
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
- "Invalid value relative index %u.", (unsigned int)idx);
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
+ "Invalid value relative index %u.", idx);
|
|
return SIZE_MAX;
|
|
}
|
|
- if (i == sm6->value_count)
|
|
+
|
|
+ if (i == dxil->value_count)
|
|
{
|
|
- WARN("Invalid value self-reference at %zu.\n", sm6->value_count);
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND, "Invalid value self-reference.");
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND, "Invalid value self-reference.");
|
|
return SIZE_MAX;
|
|
}
|
|
|
|
@@ -3084,10 +3221,34 @@ static struct sm6_value *sm6_parser_get_value_safe(struct sm6_parser *sm6, unsig
|
|
return NULL;
|
|
}
|
|
|
|
+static void sm6_parser_pre_init_or_validate_referenced_value(struct sm6_parser *dxil,
|
|
+ size_t operand, const struct sm6_type *fwd_type)
|
|
+{
|
|
+ struct sm6_value *value;
|
|
+
|
|
+ value = &dxil->values[operand];
|
|
+
|
|
+ /* If the value has a type, validate that it matches the expected type,
|
|
+ * otherwise it is a forward reference and we must set the type and
|
|
+ * initialise the value's register to SSA so it can be consumed by an
|
|
+ * instruction. */
|
|
+ if (value->type)
|
|
+ {
|
|
+ if (value->type != fwd_type)
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
+ "The type of a source value does not match the predefined type.");
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ value->type = fwd_type;
|
|
+ value->value_type = VALUE_TYPE_SSA;
|
|
+ value->u.ssa.id = sm6_parser_alloc_ssa_id(dxil);
|
|
+ }
|
|
+}
|
|
+
|
|
static size_t sm6_parser_get_value_idx_by_ref(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
const struct sm6_type *fwd_type, unsigned int *rec_idx)
|
|
{
|
|
- struct sm6_value *value;
|
|
unsigned int idx;
|
|
uint64_t val_ref;
|
|
size_t operand;
|
|
@@ -3097,6 +3258,11 @@ static size_t sm6_parser_get_value_idx_by_ref(struct sm6_parser *sm6, const stru
|
|
return SIZE_MAX;
|
|
val_ref = record->operands[idx++];
|
|
|
|
+ /* Normally only the lower 32 bits are set in the value relative index. */
|
|
+ if (val_ref > UINT32_MAX)
|
|
+ vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
+ "Ignoring upper 32 bits of DXIL SSA value relative index %"PRIx64".", val_ref);
|
|
+
|
|
operand = sm6_parser_get_value_index(sm6, val_ref);
|
|
if (operand == SIZE_MAX)
|
|
return SIZE_MAX;
|
|
@@ -3113,26 +3279,40 @@ static size_t sm6_parser_get_value_idx_by_ref(struct sm6_parser *sm6, const stru
|
|
*rec_idx = idx;
|
|
|
|
if (fwd_type)
|
|
+ sm6_parser_pre_init_or_validate_referenced_value(sm6, operand, fwd_type);
|
|
+
|
|
+ return operand;
|
|
+}
|
|
+
|
|
+static uint64_t decode_rotated_signed_value(uint64_t value)
|
|
+{
|
|
+ if (value != 1)
|
|
{
|
|
- value = &sm6->values[operand];
|
|
- if (value->type)
|
|
- {
|
|
- if (value->type != fwd_type)
|
|
- {
|
|
- WARN("Value already has a mismatching type.\n");
|
|
- vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
- "The type of a source value does not match the predefined type.");
|
|
- }
|
|
- }
|
|
- else
|
|
- {
|
|
- value->type = fwd_type;
|
|
- value->value_type = VALUE_TYPE_SSA;
|
|
- value->u.ssa.id = sm6_parser_alloc_ssa_id(sm6);
|
|
- }
|
|
+ bool neg = value & 1;
|
|
+ value >>= 1;
|
|
+ return neg ? -value : value;
|
|
}
|
|
|
|
- return operand;
|
|
+ return value << 63;
|
|
+}
|
|
+
|
|
+static const struct sm6_value *sm6_parser_get_value_by_rotated_signed_idx(struct sm6_parser *dxil,
|
|
+ uint64_t idx, const struct sm6_type *fwd_type)
|
|
+{
|
|
+ int64_t rotated_idx;
|
|
+ size_t operand;
|
|
+
|
|
+ rotated_idx = decode_rotated_signed_value(idx);
|
|
+ if (rotated_idx > INT32_MAX || rotated_idx < INT32_MIN)
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
+ "Ignoring upper 32 bits of DXIL SSA value signed relative index %"PRIx64".", rotated_idx);
|
|
+
|
|
+ if ((operand = sm6_parser_get_value_index(dxil, rotated_idx)) == SIZE_MAX)
|
|
+ return NULL;
|
|
+
|
|
+ sm6_parser_pre_init_or_validate_referenced_value(dxil, operand, fwd_type);
|
|
+
|
|
+ return &dxil->values[operand];
|
|
}
|
|
|
|
static const struct sm6_value *sm6_parser_get_value_by_ref(struct sm6_parser *sm6,
|
|
@@ -3186,8 +3366,7 @@ static bool sm6_parser_declare_function(struct sm6_parser *sm6, const struct dxi
|
|
if (record->operands[4] > UINT_MAX)
|
|
WARN("Invalid attributes id %#"PRIx64".\n", record->operands[4]);
|
|
/* 1-based index. */
|
|
- if ((fn->u.function.attribs_id = record->operands[4]))
|
|
- TRACE("Ignoring function attributes.\n");
|
|
+ fn->u.function.attribs_id = record->operands[4];
|
|
|
|
/* These always seem to be zero. */
|
|
for (i = 5, j = 0; i < min(record->operand_count, max_count); ++i)
|
|
@@ -3206,17 +3385,6 @@ static bool sm6_parser_declare_function(struct sm6_parser *sm6, const struct dxi
|
|
return true;
|
|
}
|
|
|
|
-static inline uint64_t decode_rotated_signed_value(uint64_t value)
|
|
-{
|
|
- if (value != 1)
|
|
- {
|
|
- bool neg = value & 1;
|
|
- value >>= 1;
|
|
- return neg ? -value : value;
|
|
- }
|
|
- return value << 63;
|
|
-}
|
|
-
|
|
static struct sm6_index *sm6_get_value_index(struct sm6_parser *sm6, struct sm6_value *value)
|
|
{
|
|
switch (value->value_type)
|
|
@@ -4107,7 +4275,7 @@ static enum vkd3d_result sm6_parser_globals_init(struct sm6_parser *sm6)
|
|
return VKD3D_OK;
|
|
}
|
|
|
|
-static void dst_param_io_init(struct vkd3d_shader_dst_param *param, const struct signature_element *e,
|
|
+static void dst_param_io_init(struct vsir_dst_operand *param, const struct signature_element *e,
|
|
enum vkd3d_shader_register_type reg_type, enum vsir_dimension dimension)
|
|
{
|
|
enum vkd3d_shader_component_type component_type;
|
|
@@ -4121,7 +4289,7 @@ static void dst_param_io_init(struct vkd3d_shader_dst_param *param, const struct
|
|
param->reg.dimension = dimension;
|
|
}
|
|
|
|
-static void src_params_init_from_operands(struct vkd3d_shader_src_param *src_params,
|
|
+static void src_params_init_from_operands(struct vsir_src_operand *src_params,
|
|
const struct sm6_value **operands, unsigned int count, struct sm6_parser *sm6)
|
|
{
|
|
unsigned int i;
|
|
@@ -4164,13 +4332,13 @@ static enum vkd3d_shader_register_type register_type_from_dxil_semantic_kind(
|
|
}
|
|
|
|
static void sm6_parser_init_signature(struct sm6_parser *sm6, const struct shader_signature *s,
|
|
- bool is_input, enum vkd3d_shader_register_type reg_type, struct vkd3d_shader_dst_param *params)
|
|
+ bool is_input, enum vkd3d_shader_register_type reg_type, struct vsir_dst_operand *params)
|
|
{
|
|
enum vkd3d_shader_type shader_type = sm6->program->shader_version.type;
|
|
enum vkd3d_shader_register_type io_reg_type;
|
|
bool is_patch_constant, is_control_point;
|
|
- struct vkd3d_shader_dst_param *param;
|
|
const struct signature_element *e;
|
|
+ struct vsir_dst_operand *param;
|
|
unsigned int i, count;
|
|
|
|
is_patch_constant = reg_type == VKD3DSPR_PATCHCONST;
|
|
@@ -4234,7 +4402,7 @@ static void sm6_parser_init_signature(struct sm6_parser *sm6, const struct shade
|
|
|
|
static int sm6_parser_init_output_signature(struct sm6_parser *sm6, const struct shader_signature *output_signature)
|
|
{
|
|
- if (!(sm6->output_params = vsir_program_get_dst_params(sm6->program, output_signature->element_count)))
|
|
+ if (!(sm6->output_params = vsir_program_get_dst_operands(sm6->program, output_signature->element_count)))
|
|
{
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
"Failed to allocate output parameters.");
|
|
@@ -4248,7 +4416,7 @@ static int sm6_parser_init_output_signature(struct sm6_parser *sm6, const struct
|
|
|
|
static int sm6_parser_init_input_signature(struct sm6_parser *sm6, const struct shader_signature *input_signature)
|
|
{
|
|
- if (!(sm6->input_params = vsir_program_get_dst_params(sm6->program, input_signature->element_count)))
|
|
+ if (!(sm6->input_params = vsir_program_get_dst_operands(sm6->program, input_signature->element_count)))
|
|
{
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
"Failed to allocate input parameters.");
|
|
@@ -4265,7 +4433,7 @@ static int sm6_parser_init_patch_constant_signature(struct sm6_parser *sm6,
|
|
{
|
|
bool is_input = sm6->program->shader_version.type == VKD3D_SHADER_TYPE_DOMAIN;
|
|
|
|
- if (!(sm6->patch_constant_params = vsir_program_get_dst_params(sm6->program,
|
|
+ if (!(sm6->patch_constant_params = vsir_program_get_dst_operands(sm6->program,
|
|
patch_constant_signature->element_count)))
|
|
{
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
@@ -4295,47 +4463,42 @@ static const struct sm6_value *sm6_parser_next_function_definition(struct sm6_pa
|
|
return &sm6->values[i];
|
|
}
|
|
|
|
-static struct sm6_block *sm6_block_create()
|
|
+struct function_emission_state
|
|
{
|
|
- struct sm6_block *block = vkd3d_calloc(1, sizeof(*block));
|
|
- return block;
|
|
-}
|
|
+ struct sm6_function *function;
|
|
+ const struct dxil_record *record;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ unsigned int temp_idx;
|
|
+
|
|
+ /* Keep track of whether the helper below sm6_parser_function_init()
|
|
+ * already incremented the instruction count or not. Excepected to be
|
|
+ * removed once all helpers increment the count. */
|
|
+ bool pushed_instruction;
|
|
+};
|
|
|
|
-static struct sm6_phi *sm6_block_phi_require_space(struct sm6_block *block, struct sm6_parser *sm6)
|
|
+static struct vkd3d_shader_instruction *sm6_parser_add_function_instruction(struct sm6_parser *sm6,
|
|
+ struct function_emission_state *state)
|
|
{
|
|
- struct sm6_phi *phi;
|
|
+ struct sm6_function *function = state->function;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
|
|
- if (!vkd3d_array_reserve((void **)&block->phi, &block->phi_capacity, block->phi_count + 1, sizeof(*block->phi)))
|
|
- {
|
|
- ERR("Failed to allocate phi array.\n");
|
|
+ if (!(ins = shader_instruction_array_append(&function->instructions)))
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
- "Out of memory allocating a phi instruction.");
|
|
- return NULL;
|
|
- }
|
|
- phi = &block->phi[block->phi_count++];
|
|
-
|
|
- phi->incoming = NULL;
|
|
- phi->incoming_capacity = 0;
|
|
- phi->incoming_count = 0;
|
|
+ "Out of memory allocating instruction.");
|
|
|
|
- return phi;
|
|
+ return ins;
|
|
}
|
|
|
|
-struct function_emission_state
|
|
-{
|
|
- struct sm6_block *code_block;
|
|
- struct vkd3d_shader_instruction *ins;
|
|
- unsigned int temp_idx;
|
|
-};
|
|
-
|
|
static bool sm6_parser_emit_reg_composite_construct(struct sm6_parser *sm6,
|
|
const struct vkd3d_shader_register *operand_regs, unsigned int component_count,
|
|
struct function_emission_state *state, struct vkd3d_shader_register *reg);
|
|
|
|
-static void sm6_parser_emit_alloca(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
- struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
+static void sm6_parser_emit_alloca(struct sm6_parser *sm6, struct function_emission_state *state)
|
|
{
|
|
+ struct sm6_value *dst = sm6_parser_get_current_value(sm6);
|
|
+ const struct dxil_record *record = state->record;
|
|
const struct sm6_type *type[2], *elem_type;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
const struct sm6_value *size;
|
|
unsigned int i, alignment;
|
|
uint64_t packed_operands;
|
|
@@ -4402,6 +4565,10 @@ static void sm6_parser_emit_alloca(struct sm6_parser *sm6, const struct dxil_rec
|
|
if (packed_operands)
|
|
WARN("Ignoring flags %#"PRIx64".\n", packed_operands);
|
|
|
|
+ if (!(ins = sm6_parser_add_function_instruction(sm6, state)))
|
|
+ return;
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
sm6_parser_declare_indexable_temp(sm6, elem_type, type[0]->u.array.count, alignment, true, 0, ins, dst);
|
|
}
|
|
|
|
@@ -4433,13 +4600,14 @@ static enum vkd3d_shader_opcode map_dx_atomicrmw_op(uint64_t code)
|
|
}
|
|
}
|
|
|
|
-static void sm6_parser_emit_atomicrmw(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
- struct function_emission_state *state, struct sm6_value *dst)
|
|
+static void sm6_parser_emit_atomicrmw(struct sm6_parser *sm6, struct function_emission_state *state)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst_params;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct sm6_value *dst = sm6_parser_get_current_value(sm6);
|
|
+ const struct dxil_record *record = state->record;
|
|
struct vkd3d_shader_register regs[2], reg;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
+ struct vsir_dst_operand *dst_params;
|
|
struct vkd3d_shader_register coord;
|
|
const struct sm6_value *ptr, *src;
|
|
enum vkd3d_shader_opcode op;
|
|
@@ -4499,12 +4667,18 @@ static void sm6_parser_emit_atomicrmw(struct sm6_parser *sm6, const struct dxil_
|
|
return;
|
|
}
|
|
|
|
- ins = state->ins;
|
|
+ if (!(ins = sm6_parser_add_function_instruction(sm6, state)))
|
|
+ return;
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
vsir_instruction_init(ins, &sm6->p.location, op);
|
|
ins->flags = is_volatile ? VKD3DARF_VOLATILE : 0;
|
|
|
|
if (!(src_params = instruction_src_params_alloc(ins, 2, sm6)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
return;
|
|
+ }
|
|
if (ptr->structure_stride)
|
|
src_param_init_vector_from_reg(&src_params[0], &coord);
|
|
else
|
|
@@ -4513,7 +4687,11 @@ static void sm6_parser_emit_atomicrmw(struct sm6_parser *sm6, const struct dxil_
|
|
|
|
sm6_parser_init_ssa_value(sm6, dst);
|
|
|
|
- dst_params = instruction_dst_params_alloc(ins, 2, sm6);
|
|
+ if (!(dst_params = instruction_dst_params_alloc(ins, 2, sm6)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
+ return;
|
|
+ }
|
|
vsir_register_from_dxil_value(&dst_params[0].reg, dst, 0, sm6);
|
|
dst_param_init(&dst_params[0]);
|
|
|
|
@@ -4622,12 +4800,14 @@ static enum vkd3d_shader_opcode map_binary_op(uint64_t code, const struct sm6_ty
|
|
return op;
|
|
}
|
|
|
|
-static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
- struct sm6_block *code_block, struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
+static void sm6_parser_emit_binop(struct sm6_parser *sm6, struct function_emission_state *state)
|
|
{
|
|
+ struct sm6_value *dst = sm6_parser_get_current_value(sm6);
|
|
+ const struct dxil_record *record = state->record;
|
|
enum vkd3d_shader_opcode opcode, aux_opcode;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
- struct vkd3d_shader_dst_param *dst_params;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
+ struct vsir_dst_operand *dst_params;
|
|
uint32_t type_flags = 0, aux_id = 0;
|
|
const struct sm6_value *a, *b;
|
|
uint64_t code, flags;
|
|
@@ -4650,11 +4830,17 @@ static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_reco
|
|
|
|
if (aux_opcode != VSIR_OP_NOP)
|
|
{
|
|
+ if (!(ins = sm6_parser_add_function_instruction(sm6, state)))
|
|
+ return;
|
|
+
|
|
vsir_instruction_init(ins, &sm6->p.location, aux_opcode);
|
|
|
|
if (!(dst_params = instruction_dst_params_alloc(ins, 1, sm6))
|
|
|| !(src_params = instruction_src_params_alloc(ins, 1, sm6)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
return;
|
|
+ }
|
|
|
|
aux_id = sm6_parser_alloc_ssa_id(sm6);
|
|
|
|
@@ -4662,11 +4848,13 @@ static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_reco
|
|
|
|
dst_param_init(&dst_params[0]);
|
|
register_init_with_id(&dst_params[0].reg, VKD3DSPR_SSA, src_params[0].reg.data_type, aux_id);
|
|
-
|
|
- ++ins;
|
|
- ++code_block->instruction_count;
|
|
}
|
|
|
|
+ if (!(ins = sm6_parser_add_function_instruction(sm6, state)))
|
|
+ return;
|
|
+
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
vsir_instruction_init(ins, &sm6->p.location, opcode);
|
|
|
|
flags = (record->operand_count > i) ? record->operands[i] : 0;
|
|
@@ -4715,7 +4903,10 @@ static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_reco
|
|
}
|
|
|
|
if (!(src_params = instruction_src_params_alloc(ins, 2, sm6)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
return;
|
|
+ }
|
|
|
|
src_param_init_from_value(&src_params[0], a, type_flags, sm6);
|
|
|
|
@@ -4738,62 +4929,92 @@ static void sm6_parser_emit_binop(struct sm6_parser *sm6, const struct dxil_reco
|
|
* do. */
|
|
ins->flags |= VKD3DSI_SHIFT_UNMASKED;
|
|
}
|
|
- instruction_dst_param_init_ssa_scalar(ins, type_flags, sm6);
|
|
+
|
|
+ if (!instruction_dst_param_init_ssa_scalar(ins, type_flags, sm6))
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
}
|
|
|
|
-static const struct sm6_block *sm6_function_get_block(const struct sm6_function *function, uint64_t index,
|
|
- struct sm6_parser *sm6)
|
|
+static bool sm6_function_validate_block_index(const struct sm6_function *function,
|
|
+ uint64_t index, struct sm6_parser *dxil)
|
|
{
|
|
if (index >= function->block_count)
|
|
{
|
|
- WARN("Invalid code block index %#"PRIx64".\n", index);
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
"Invalid code block index %#"PRIx64" for a control flow instruction.", index);
|
|
- return NULL;
|
|
+ return false;
|
|
}
|
|
- return function->blocks[index];
|
|
+
|
|
+ return true;
|
|
}
|
|
|
|
-static void sm6_parser_emit_br(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
- struct sm6_function *function, struct sm6_block *code_block, struct vkd3d_shader_instruction *ins)
|
|
+static void sm6_parser_emit_br(struct sm6_parser *dxil, struct function_emission_state *state)
|
|
{
|
|
+ const struct dxil_record *record = state->record;
|
|
+ struct sm6_function *function = state->function;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
const struct sm6_value *value;
|
|
unsigned int i = 2;
|
|
|
|
if (record->operand_count != 1 && record->operand_count < 3)
|
|
{
|
|
- WARN("Invalid operand count %u.\n", record->operand_count);
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
"Invalid operand count %u for a branch instruction.", record->operand_count);
|
|
return;
|
|
}
|
|
|
|
if (record->operand_count == 1)
|
|
{
|
|
- code_block->terminator.type = TERMINATOR_UNCOND_BR;
|
|
- code_block->terminator.true_block = sm6_function_get_block(function, record->operands[0], sm6);
|
|
- }
|
|
- else
|
|
- {
|
|
- if (!sm6->bool_type)
|
|
- {
|
|
- WARN("Bool type not found.\n");
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_MODULE,
|
|
- "Module does not define a boolean type for conditions.");
|
|
+ if (!sm6_function_validate_block_index(function, record->operands[0], dxil))
|
|
return;
|
|
- }
|
|
- if (!(value = sm6_parser_get_value_by_ref(sm6, record, sm6->bool_type, &i))
|
|
- || !sm6_value_validate_is_bool(value, sm6))
|
|
+
|
|
+ if (!(ins = sm6_parser_add_function_instruction(dxil, state)))
|
|
return;
|
|
- dxil_record_validate_operand_max_count(record, i, sm6);
|
|
|
|
- code_block->terminator.type = TERMINATOR_COND_BR;
|
|
- vsir_register_from_dxil_value(&code_block->terminator.conditional_reg, value, 0, sm6);
|
|
- code_block->terminator.true_block = sm6_function_get_block(function, record->operands[0], sm6);
|
|
- code_block->terminator.false_block = sm6_function_get_block(function, record->operands[1], sm6);
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
+ vsir_instruction_init(ins, &dxil->p.location, VSIR_OP_BRANCH);
|
|
+ if (!(src_params = instruction_src_params_alloc(ins, 1, dxil)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
+ return;
|
|
+ }
|
|
+ /* Label id is 1-based. */
|
|
+ vsir_src_operand_init_label(&src_params[0], record->operands[0] + 1);
|
|
}
|
|
+ else
|
|
+ {
|
|
+ if (!dxil->bool_type)
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_MODULE,
|
|
+ "Module does not define a boolean type for conditions.");
|
|
+ return;
|
|
+ }
|
|
+ if (!(value = sm6_parser_get_value_by_ref(dxil, record, dxil->bool_type, &i))
|
|
+ || !sm6_value_validate_is_bool(value, dxil))
|
|
+ return;
|
|
+ dxil_record_validate_operand_max_count(record, i, dxil);
|
|
|
|
- ins->opcode = VSIR_OP_NOP;
|
|
+ if (!sm6_function_validate_block_index(function, record->operands[0], dxil)
|
|
+ || !sm6_function_validate_block_index(function, record->operands[1], dxil))
|
|
+ return;
|
|
+
|
|
+ if (!(ins = sm6_parser_add_function_instruction(dxil, state)))
|
|
+ return;
|
|
+
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
+ vsir_instruction_init(ins, &dxil->p.location, VSIR_OP_BRANCH);
|
|
+ if (!(src_params = instruction_src_params_alloc(ins, 3, dxil)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
+ return;
|
|
+ }
|
|
+ src_param_init_from_value(&src_params[0], value, 0, dxil);
|
|
+ /* Label id is 1-based. */
|
|
+ vsir_src_operand_init_label(&src_params[1], record->operands[0] + 1);
|
|
+ vsir_src_operand_init_label(&src_params[2], record->operands[1] + 1);
|
|
+ }
|
|
}
|
|
|
|
static bool sm6_parser_emit_reg_composite_construct(struct sm6_parser *sm6,
|
|
@@ -4801,8 +5022,8 @@ static bool sm6_parser_emit_reg_composite_construct(struct sm6_parser *sm6,
|
|
struct function_emission_state *state, struct vkd3d_shader_register *reg)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
+ struct vsir_src_operand *src_params;
|
|
+ struct vsir_dst_operand *dst_param;
|
|
bool all_constant = true;
|
|
unsigned int i;
|
|
|
|
@@ -4847,7 +5068,7 @@ static bool sm6_parser_emit_reg_composite_construct(struct sm6_parser *sm6,
|
|
}
|
|
|
|
state->ins = ins;
|
|
- state->code_block->instruction_count += component_count;
|
|
+ state->function->instructions.count += component_count;
|
|
|
|
return true;
|
|
}
|
|
@@ -4990,18 +5211,27 @@ static enum vkd3d_shader_opcode map_dx_unary_op(enum dx_intrinsic_opcode op)
|
|
}
|
|
}
|
|
|
|
-static void sm6_parser_emit_dx_unary(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
+static void sm6_parser_emit_dx_unary(struct sm6_parser *dxil, enum dx_intrinsic_opcode op,
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
- struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
- vsir_instruction_init(ins, &sm6->p.location, map_dx_unary_op(op));
|
|
- if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
|
|
+ if (!(ins = sm6_parser_add_function_instruction(dxil, state)))
|
|
return;
|
|
- src_param_init_from_value(src_param, operands[0], 0, sm6);
|
|
|
|
- instruction_dst_param_init_ssa_scalar(ins, 0, sm6);
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
+ vsir_instruction_init(ins, &dxil->p.location, map_dx_unary_op(op));
|
|
+ if (!(src_param = instruction_src_params_alloc(ins, 1, dxil)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
+ return;
|
|
+ }
|
|
+ src_param_init_from_value(src_param, operands[0], 0, dxil);
|
|
+
|
|
+ if (!instruction_dst_param_init_ssa_scalar(ins, 0, dxil))
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
}
|
|
|
|
static enum vkd3d_shader_opcode map_dx_binary_op(enum dx_intrinsic_opcode op,
|
|
@@ -5038,7 +5268,7 @@ static void sm6_parser_emit_dx_binary(struct sm6_parser *sm6, enum dx_intrinsic_
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vsir_src_operand *src_params;
|
|
uint32_t type_flags;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, map_dx_binary_op(op, operands[0]->type, &type_flags));
|
|
@@ -5089,9 +5319,9 @@ static void sm6_parser_emit_dx_atomic_binop(struct sm6_parser *sm6, enum dx_intr
|
|
enum vkd3d_shader_resource_type resource_type;
|
|
bool is_cmp_xchg = op == DX_ATOMIC_CMP_XCHG;
|
|
unsigned int i, coord_idx, coord_count = 1;
|
|
- struct vkd3d_shader_dst_param *dst_params;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
+ struct vsir_dst_operand *dst_params;
|
|
const struct sm6_value *resource;
|
|
struct vkd3d_shader_register reg;
|
|
enum vkd3d_shader_opcode opcode;
|
|
@@ -5129,11 +5359,19 @@ static void sm6_parser_emit_dx_atomic_binop(struct sm6_parser *sm6, enum dx_intr
|
|
}
|
|
}
|
|
|
|
- ins = state->ins;
|
|
+ if (!(ins = sm6_parser_add_function_instruction(sm6, state)))
|
|
+ return;
|
|
+
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
vsir_instruction_init(ins, &sm6->p.location, opcode);
|
|
|
|
if (!(src_params = instruction_src_params_alloc(ins, 2 + is_cmp_xchg, sm6)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
return;
|
|
+ }
|
|
+
|
|
src_param_init_vector_from_reg(&src_params[0], ®);
|
|
if (is_cmp_xchg)
|
|
src_param_init_from_value(&src_params[1], operands[4], 0, sm6);
|
|
@@ -5141,39 +5379,46 @@ static void sm6_parser_emit_dx_atomic_binop(struct sm6_parser *sm6, enum dx_intr
|
|
|
|
sm6_parser_init_ssa_value(sm6, dst);
|
|
|
|
- dst_params = instruction_dst_params_alloc(ins, 2, sm6);
|
|
+ if (!(dst_params = instruction_dst_params_alloc(ins, 2, sm6)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
+ return;
|
|
+ }
|
|
+
|
|
dst_param_init(&dst_params[0]);
|
|
vsir_register_from_dxil_value(&dst_params[0].reg, dst, 0, sm6);
|
|
dst_param_init(&dst_params[1]);
|
|
sm6_register_from_handle(sm6, &resource->u.handle, &dst_params[1].reg);
|
|
}
|
|
|
|
-static void sm6_parser_emit_dx_barrier(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
+static void sm6_parser_emit_dx_barrier(struct sm6_parser *dxil, enum dx_intrinsic_opcode op,
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
- struct vkd3d_shader_instruction *ins = state->ins;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
enum dxil_sync_flags flags;
|
|
|
|
- vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_SYNC);
|
|
- flags = sm6_value_get_constant_uint(operands[0], sm6);
|
|
+ if (!(ins = sm6_parser_add_function_instruction(dxil, state)))
|
|
+ return;
|
|
+
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
+ vsir_instruction_init(ins, &dxil->p.location, VSIR_OP_SYNC);
|
|
+ flags = sm6_value_get_constant_uint(operands[0], dxil);
|
|
ins->flags = flags & (SYNC_THREAD_GROUP | SYNC_THREAD_GROUP_UAV);
|
|
if (flags & SYNC_GLOBAL_UAV)
|
|
ins->flags |= VKD3DSSF_GLOBAL_UAV;
|
|
if (flags & SYNC_GROUP_SHARED_MEMORY)
|
|
ins->flags |= VKD3DSSF_GROUP_SHARED_MEMORY;
|
|
if (flags &= ~(SYNC_THREAD_GROUP | SYNC_GLOBAL_UAV | SYNC_THREAD_GROUP_UAV | SYNC_GROUP_SHARED_MEMORY))
|
|
- {
|
|
- FIXME("Unhandled flags %#x.\n", flags);
|
|
- vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
"Barrier flags %#x are unhandled.", flags);
|
|
- }
|
|
}
|
|
|
|
static void sm6_parser_emit_dx_buffer_update_counter(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vsir_src_operand *src_params;
|
|
const struct sm6_value *resource;
|
|
unsigned int i;
|
|
int8_t inc;
|
|
@@ -5209,8 +5454,8 @@ static void sm6_parser_emit_dx_calculate_lod(struct sm6_parser *sm6, enum dx_int
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
const struct sm6_value *resource, *sampler;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
struct vkd3d_shader_register coord;
|
|
unsigned int clamp;
|
|
|
|
@@ -5244,7 +5489,7 @@ static void sm6_parser_emit_dx_cbuffer_load(struct sm6_parser *sm6, enum dx_intr
|
|
{
|
|
struct sm6_value *dst = sm6_parser_get_current_value(sm6);
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
const struct sm6_value *buffer;
|
|
const struct sm6_type *type;
|
|
|
|
@@ -5276,8 +5521,8 @@ static void sm6_parser_emit_dx_cbuffer_load(struct sm6_parser *sm6, enum dx_intr
|
|
static void sm6_parser_dcl_register_builtin(struct sm6_parser *dxil, enum vkd3d_shader_opcode opcode,
|
|
enum vkd3d_shader_register_type reg_type, enum vsir_data_type data_type, unsigned int component_count)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_dst_operand *dst_param;
|
|
|
|
if (!bitmap_is_set(dxil->io_regs_declared, reg_type))
|
|
{
|
|
@@ -5293,7 +5538,7 @@ static void sm6_parser_dcl_register_builtin(struct sm6_parser *dxil, enum vkd3d_
|
|
static void sm6_parser_emit_dx_input_register_mov(struct sm6_parser *sm6, struct vkd3d_shader_instruction *ins,
|
|
enum vkd3d_shader_register_type reg_type, enum vsir_data_type data_type, bool scalar)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
|
|
|
|
@@ -5372,7 +5617,7 @@ static void sm6_parser_emit_dx_stream(struct sm6_parser *sm6, enum dx_intrinsic_
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
unsigned int i;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, (op == DX_CUT_STREAM) ? VSIR_OP_CUT_STREAM : VSIR_OP_EMIT_STREAM);
|
|
@@ -5394,7 +5639,7 @@ static void sm6_parser_emit_dx_stream(struct sm6_parser *sm6, enum dx_intrinsic_
|
|
if (op == DX_EMIT_THEN_CUT_STREAM)
|
|
{
|
|
++state->ins;
|
|
- ++state->code_block->instruction_count;
|
|
+ ++state->function->instructions.count;
|
|
sm6_parser_emit_dx_stream(sm6, DX_CUT_STREAM, operands, state);
|
|
}
|
|
}
|
|
@@ -5403,7 +5648,7 @@ static void sm6_parser_emit_dx_discard(struct sm6_parser *sm6, enum dx_intrinsic
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_DISCARD);
|
|
|
|
@@ -5415,7 +5660,7 @@ static void sm6_parser_emit_dx_domain_location(struct sm6_parser *sm6, enum dx_i
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
unsigned int component_idx;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
|
|
@@ -5441,9 +5686,9 @@ static void sm6_parser_emit_dx_domain_location(struct sm6_parser *sm6, enum dx_i
|
|
static void sm6_parser_emit_dx_dot(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
struct vkd3d_shader_instruction *ins;
|
|
struct vkd3d_shader_register regs[2];
|
|
+ struct vsir_src_operand *src_params;
|
|
enum vkd3d_shader_opcode opcode;
|
|
unsigned int component_count;
|
|
|
|
@@ -5484,9 +5729,9 @@ static void sm6_parser_emit_dx_eval_attrib(struct sm6_parser *sm6, enum dx_intri
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
const struct shader_signature *signature;
|
|
unsigned int row_index, column_index;
|
|
+ struct vsir_src_operand *src_params;
|
|
const struct signature_element *e;
|
|
|
|
row_index = sm6_value_get_constant_uint(operands[0], sm6);
|
|
@@ -5531,7 +5776,7 @@ static void sm6_parser_emit_dx_fabs(struct sm6_parser *sm6, enum dx_intrinsic_op
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_ABS);
|
|
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
|
|
@@ -5546,8 +5791,8 @@ static void sm6_parser_emit_dx_compute_builtin(struct sm6_parser *sm6, enum dx_i
|
|
{
|
|
unsigned int component_count = 3, component_idx = 0;
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
enum vkd3d_shader_register_type reg_type;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
switch (op)
|
|
{
|
|
@@ -5601,7 +5846,7 @@ static void sm6_parser_emit_dx_ma(struct sm6_parser *sm6, enum dx_intrinsic_opco
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vsir_src_operand *src_params;
|
|
unsigned int i;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, sm6_dx_map_ma_op(op, operands[0]->type));
|
|
@@ -5619,11 +5864,11 @@ static void sm6_parser_emit_dx_get_dimensions(struct sm6_parser *sm6, enum dx_in
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
unsigned int is_texture, component_count;
|
|
enum dxil_resource_kind resource_kind;
|
|
- struct vkd3d_shader_dst_param *dst;
|
|
+ struct vsir_src_operand *src_params;
|
|
const struct sm6_value *resource;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
resource = operands[0];
|
|
if (!sm6_value_validate_is_handle(resource, sm6))
|
|
@@ -5672,7 +5917,7 @@ static void sm6_parser_emit_dx_get_dimensions(struct sm6_parser *sm6, enum dx_in
|
|
src_param_init_vector_from_reg(&src_params[0], &dst->reg);
|
|
|
|
state->ins = ins;
|
|
- state->code_block->instruction_count += 2;
|
|
+ state->function->instructions.count += 2;
|
|
}
|
|
}
|
|
else
|
|
@@ -5706,7 +5951,7 @@ static void sm6_parser_emit_dx_tertiary(struct sm6_parser *sm6, enum dx_intrinsi
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vsir_src_operand *src_params;
|
|
unsigned int i;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, sm6_dx_map_tertiary_op(op));
|
|
@@ -5728,9 +5973,9 @@ static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, enum dx_intrin
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
struct vsir_program *program = sm6->program;
|
|
unsigned int count, row_index, column_index;
|
|
- const struct vkd3d_shader_dst_param *params;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
const struct shader_signature *signature;
|
|
+ const struct vsir_dst_operand *params;
|
|
+ struct vsir_src_operand *src_param;
|
|
const struct signature_element *e;
|
|
|
|
row_index = sm6_value_get_constant_uint(operands[0], sm6);
|
|
@@ -5791,8 +6036,8 @@ static void sm6_parser_emit_dx_load_input(struct sm6_parser *sm6, enum dx_intrin
|
|
static void sm6_parser_emit_dx_make_double(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
struct vkd3d_shader_register reg;
|
|
|
|
if (!sm6_parser_emit_composite_construct(sm6, &operands[0], 2, state, ®))
|
|
@@ -5839,7 +6084,7 @@ static void sm6_parser_emit_dx_quad_op(struct sm6_parser *sm6, enum dx_intrinsic
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
enum vkd3d_shader_opcode opcode;
|
|
enum dxil_quad_op_kind quad_op;
|
|
|
|
@@ -5865,8 +6110,8 @@ static void sm6_parser_emit_dx_raw_buffer_load(struct sm6_parser *sm6, enum dx_i
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
unsigned int operand_count, write_mask, component_count = VKD3D_VEC4_SIZE;
|
|
- struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
const struct sm6_value *resource;
|
|
bool raw;
|
|
|
|
@@ -5880,37 +6125,46 @@ static void sm6_parser_emit_dx_raw_buffer_load(struct sm6_parser *sm6, enum dx_i
|
|
write_mask = sm6_value_get_constant_uint(operands[3], sm6);
|
|
if (!write_mask || write_mask > VKD3DSP_WRITEMASK_ALL)
|
|
{
|
|
- WARN("Invalid write mask %#x.\n", write_mask);
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
"Write mask %#x for a raw/structured buffer load operation is invalid.", write_mask);
|
|
return;
|
|
}
|
|
else if (write_mask & (write_mask + 1))
|
|
{
|
|
- FIXME("Unhandled write mask %#x.\n", write_mask);
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
"Write mask %#x for a raw/structured buffer load operation is unhandled.", write_mask);
|
|
}
|
|
component_count = vsir_write_mask_component_count(write_mask);
|
|
}
|
|
|
|
+ if (!(ins = sm6_parser_add_function_instruction(sm6, state)))
|
|
+ return;
|
|
+
|
|
+ state->pushed_instruction = true;
|
|
+
|
|
instruction_init_with_resource(ins, raw ? VSIR_OP_LD_RAW : VSIR_OP_LD_STRUCTURED, resource, sm6);
|
|
operand_count = 2 + !raw;
|
|
+
|
|
if (!(src_params = instruction_src_params_alloc(ins, operand_count, sm6)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
return;
|
|
+ }
|
|
+
|
|
src_params_init_from_operands(src_params, &operands[1], operand_count - 1, sm6);
|
|
src_param_init_vector_from_handle(sm6, &src_params[operand_count - 1], &resource->u.handle);
|
|
|
|
- instruction_dst_param_init_ssa_vector(ins, component_count, sm6);
|
|
+ if (!instruction_dst_param_init_ssa_vector(ins, component_count, sm6))
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
}
|
|
|
|
static void sm6_parser_emit_dx_raw_buffer_store(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
unsigned int write_mask, component_count, alignment = 0, operand_count;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
+ struct vsir_dst_operand *dst_param;
|
|
struct vkd3d_shader_register data;
|
|
const struct sm6_value *resource;
|
|
bool raw;
|
|
@@ -5977,8 +6231,8 @@ static void sm6_parser_emit_dx_raw_buffer_store(struct sm6_parser *sm6, enum dx_
|
|
static void sm6_parser_emit_dx_buffer_load(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
- struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
const struct sm6_value *resource;
|
|
|
|
resource = operands[0];
|
|
@@ -5987,43 +6241,46 @@ static void sm6_parser_emit_dx_buffer_load(struct sm6_parser *sm6, enum dx_intri
|
|
|
|
if (resource->u.handle.d->kind == RESOURCE_KIND_RAWBUFFER
|
|
|| resource->u.handle.d->kind == RESOURCE_KIND_STRUCTUREDBUFFER)
|
|
- {
|
|
return sm6_parser_emit_dx_raw_buffer_load(sm6, op, operands, state);
|
|
- }
|
|
|
|
if (resource->u.handle.d->kind != RESOURCE_KIND_TYPEDBUFFER)
|
|
- {
|
|
- WARN("Resource is not a typed buffer.\n");
|
|
vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_INVALID_OPERATION,
|
|
"Resource for a typed buffer load is not a typed buffer.");
|
|
- }
|
|
+
|
|
+ if (!(ins = sm6_parser_add_function_instruction(sm6, state)))
|
|
+ return;
|
|
+
|
|
+ state->pushed_instruction = true;
|
|
|
|
instruction_init_with_resource(ins, (resource->u.handle.d->type == VKD3D_SHADER_DESCRIPTOR_TYPE_UAV)
|
|
? VSIR_OP_LD_UAV_TYPED : VSIR_OP_LD, resource, sm6);
|
|
|
|
if (!(src_params = instruction_src_params_alloc(ins, 2, sm6)))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
return;
|
|
+ }
|
|
+
|
|
src_param_init_from_value(&src_params[0], operands[1], 0, sm6);
|
|
+ /* Constant zero would be ok, but is not worth checking for unless it
|
|
+ * shows up. */
|
|
if (!sm6_value_is_undef(operands[2]))
|
|
- {
|
|
- /* Constant zero would be ok, but is not worth checking for unless it shows up. */
|
|
- WARN("Ignoring structure offset.\n");
|
|
vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_OPERANDS,
|
|
"Ignoring structure offset for a typed buffer load.");
|
|
- }
|
|
src_param_init_vector_from_handle(sm6, &src_params[1], &resource->u.handle);
|
|
|
|
- instruction_dst_param_init_ssa_vector(ins, VKD3D_VEC4_SIZE, sm6);
|
|
+ if (!instruction_dst_param_init_ssa_vector(ins, VKD3D_VEC4_SIZE, sm6))
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
}
|
|
|
|
static void sm6_parser_emit_dx_buffer_store(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
unsigned int write_mask, component_count;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
struct vkd3d_shader_register texel;
|
|
+ struct vsir_dst_operand *dst_param;
|
|
const struct sm6_value *resource;
|
|
|
|
resource = operands[0];
|
|
@@ -6087,7 +6344,7 @@ static void sm6_parser_emit_dx_get_sample_count(struct sm6_parser *sm6, enum dx_
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_SAMPLE_INFO);
|
|
ins->flags = VKD3DSI_SAMPLE_INFO_UINT;
|
|
@@ -6106,8 +6363,8 @@ static void sm6_parser_emit_dx_get_sample_pos(struct sm6_parser *sm6, enum dx_in
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
const struct sm6_value *resource = NULL;
|
|
+ struct vsir_src_operand *src_params;
|
|
|
|
if (op == DX_TEX2DMS_GET_SAMPLE_POS)
|
|
{
|
|
@@ -6158,8 +6415,8 @@ static void sm6_parser_emit_dx_sample(struct sm6_parser *sm6, enum dx_intrinsic_
|
|
unsigned int clamp_idx = 0, component_count = VKD3D_VEC4_SIZE;
|
|
struct vkd3d_shader_register coord, ddx, ddy;
|
|
const struct sm6_value *resource, *sampler;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
|
|
resource = operands[0];
|
|
sampler = operands[1];
|
|
@@ -6241,7 +6498,7 @@ static void sm6_parser_emit_dx_sample_index(struct sm6_parser *sm6, enum dx_intr
|
|
{
|
|
const struct shader_signature *signature = &sm6->program->input_signature;
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
unsigned int element_idx;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
|
|
@@ -6268,7 +6525,7 @@ static void sm6_parser_emit_dx_saturate(struct sm6_parser *sm6, enum dx_intrinsi
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_SATURATE);
|
|
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
|
|
@@ -6282,7 +6539,7 @@ static void sm6_parser_emit_dx_split_double(struct sm6_parser *sm6, enum dx_intr
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_MOV);
|
|
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
|
|
@@ -6298,10 +6555,10 @@ static void sm6_parser_emit_dx_store_output(struct sm6_parser *sm6, enum dx_intr
|
|
bool is_patch_constant = op == DX_STORE_PATCH_CONSTANT;
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
struct vsir_program *program = sm6->program;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
const struct shader_signature *signature;
|
|
unsigned int row_index, column_index;
|
|
+ struct vsir_src_operand *src_param;
|
|
+ struct vsir_dst_operand *dst_param;
|
|
const struct signature_element *e;
|
|
const struct sm6_value *value;
|
|
|
|
@@ -6360,8 +6617,8 @@ static void sm6_parser_emit_dx_texture_gather(struct sm6_parser *sm6, enum dx_in
|
|
{
|
|
struct vkd3d_shader_register coord, offset;
|
|
const struct sm6_value *resource, *sampler;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
unsigned int swizzle;
|
|
bool extended_offset;
|
|
|
|
@@ -6426,8 +6683,8 @@ static void sm6_parser_emit_dx_texture_load(struct sm6_parser *sm6, enum dx_intr
|
|
{
|
|
const struct sm6_value *resource, *mip_level_or_sample_count;
|
|
enum vkd3d_shader_resource_type resource_type;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
struct vkd3d_shader_register coord;
|
|
bool is_multisample, is_uav;
|
|
unsigned int i;
|
|
@@ -6470,10 +6727,10 @@ static void sm6_parser_emit_dx_texture_store(struct sm6_parser *sm6, enum dx_int
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_register coord, texel;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
unsigned int write_mask, component_count;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_params;
|
|
+ struct vsir_dst_operand *dst_param;
|
|
const struct sm6_value *resource;
|
|
|
|
resource = operands[0];
|
|
@@ -6520,7 +6777,7 @@ static void sm6_parser_emit_dx_wave_active_ballot(struct sm6_parser *sm6, enum d
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
vsir_instruction_init(ins, &sm6->p.location, VSIR_OP_WAVE_ACTIVE_BALLOT);
|
|
if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
|
|
@@ -6553,8 +6810,8 @@ static void sm6_parser_emit_dx_wave_active_bit(struct sm6_parser *sm6, enum dx_i
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
enum dxil_wave_bit_op_kind wave_op;
|
|
+ struct vsir_src_operand *src_param;
|
|
enum vkd3d_shader_opcode opcode;
|
|
|
|
wave_op = sm6_value_get_constant_uint(operands[1], sm6);
|
|
@@ -6599,7 +6856,7 @@ static void sm6_parser_emit_dx_wave_op(struct sm6_parser *sm6, enum dx_intrinsic
|
|
const struct sm6_value **operands, struct function_emission_state *state)
|
|
{
|
|
struct vkd3d_shader_instruction *ins = state->ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
enum vkd3d_shader_opcode opcode;
|
|
enum dxil_wave_op_kind wave_op;
|
|
bool is_signed;
|
|
@@ -6828,13 +7085,13 @@ static bool sm6_parser_validate_operand_type(struct sm6_parser *sm6, const struc
|
|
case 'g':
|
|
return sm6_type_is_floating_point(type);
|
|
case 'H':
|
|
- return sm6_value_is_handle(value) && type == sm6->handle_type;
|
|
+ return sm6_value_is_handle(value) && type->u.struc->well_known == WELL_KNOWN_HANDLE;
|
|
case 'D':
|
|
- return sm6_type_is_struct(type) && !strcmp(type->u.struc->name, "dx.types.Dimensions");
|
|
+ return sm6_type_is_struct(type) && type->u.struc->well_known == WELL_KNOWN_DIMENSIONS;
|
|
case 'S':
|
|
- return sm6_type_is_struct(type) && !strcmp(type->u.struc->name, "dx.types.splitdouble");
|
|
+ return sm6_type_is_struct(type) && type->u.struc->well_known == WELL_KNOWN_SPLITDOUBLE;
|
|
case 'V':
|
|
- return sm6_type_is_struct(type) && !strcmp(type->u.struc->name, "dx.types.fouri32");
|
|
+ return sm6_type_is_struct(type) && type->u.struc->well_known == WELL_KNOWN_FOURI32;
|
|
case 'v':
|
|
return sm6_value_is_invalid(value) && !type;
|
|
case 'o':
|
|
@@ -6878,35 +7135,19 @@ static bool sm6_parser_validate_dx_op(struct sm6_parser *sm6, enum dx_intrinsic_
|
|
return true;
|
|
}
|
|
|
|
-static void sm6_parser_emit_unhandled(struct sm6_parser *sm6, struct vkd3d_shader_instruction *ins,
|
|
- struct sm6_value *dst)
|
|
-{
|
|
- ins->opcode = VSIR_OP_NOP;
|
|
-
|
|
- if (!dst->type)
|
|
- return;
|
|
-
|
|
- dst->value_type = VALUE_TYPE_INVALID;
|
|
-}
|
|
-
|
|
static void sm6_parser_decode_dx_op(struct sm6_parser *sm6, enum dx_intrinsic_opcode op,
|
|
const char *name, const struct sm6_value **operands, unsigned int operand_count,
|
|
struct function_emission_state *state, struct sm6_value *dst)
|
|
{
|
|
if (op >= ARRAY_SIZE(sm6_dx_op_table) || !sm6_dx_op_table[op].operand_info)
|
|
{
|
|
- FIXME("Unhandled dx intrinsic function id %u, '%s'.\n", op, name);
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_UNHANDLED_INTRINSIC,
|
|
"Call to intrinsic function %s is unhandled.", name);
|
|
- sm6_parser_emit_unhandled(sm6, state->ins, dst);
|
|
return;
|
|
}
|
|
|
|
if (!sm6_parser_validate_dx_op(sm6, op, name, operands, operand_count, dst))
|
|
- {
|
|
- sm6_parser_emit_unhandled(sm6, state->ins, dst);
|
|
return;
|
|
- }
|
|
|
|
sm6_dx_op_table[op].handler(sm6, op, operands, state);
|
|
|
|
@@ -6920,10 +7161,11 @@ static void sm6_parser_decode_dx_op(struct sm6_parser *sm6, enum dx_intrinsic_op
|
|
}
|
|
}
|
|
|
|
-static void sm6_parser_emit_call(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
- struct function_emission_state *state, struct sm6_value *dst)
|
|
+static void sm6_parser_emit_call(struct sm6_parser *sm6, struct function_emission_state *state)
|
|
{
|
|
+ struct sm6_value *dst = sm6_parser_get_current_value(sm6);
|
|
const struct sm6_value *operands[DXIL_OP_MAX_OPERANDS];
|
|
+ const struct dxil_record *record = state->record;
|
|
const struct sm6_value *fn_value, *op_value;
|
|
unsigned int i = 1, j, operand_count;
|
|
const struct sm6_type *type = NULL;
|
|
@@ -7122,7 +7364,7 @@ static enum vkd3d_shader_opcode dxil_map_cast_op(uint64_t code, const struct sm6
|
|
static void sm6_parser_emit_cast(struct sm6_parser *dxil, const struct dxil_record *record,
|
|
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
const struct sm6_value *value;
|
|
enum vkd3d_shader_opcode op;
|
|
const struct sm6_type *type;
|
|
@@ -7219,8 +7461,8 @@ static const struct sm6_cmp_info *sm6_map_cmp2_op(uint64_t code)
|
|
static void sm6_parser_emit_cmp2(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
const struct sm6_type *type_a, *type_b;
|
|
+ struct vsir_src_operand *src_params;
|
|
bool is_int, is_fp, silence_warning;
|
|
const struct sm6_cmp_info *cmp;
|
|
const struct sm6_value *a, *b;
|
|
@@ -7321,9 +7563,9 @@ static void sm6_parser_emit_cmpxchg(struct sm6_parser *sm6, const struct dxil_re
|
|
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
{
|
|
uint64_t success_ordering, failure_ordering;
|
|
- struct vkd3d_shader_dst_param *dst_params;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
const struct sm6_value *ptr, *cmp, *new;
|
|
+ struct vsir_src_operand *src_params;
|
|
+ struct vsir_dst_operand *dst_params;
|
|
struct vkd3d_shader_register reg;
|
|
unsigned int i = 0;
|
|
bool is_volatile;
|
|
@@ -7407,7 +7649,7 @@ static void sm6_parser_emit_cmpxchg(struct sm6_parser *sm6, const struct dxil_re
|
|
static void sm6_parser_emit_extractval(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src_param;
|
|
const struct sm6_type *type;
|
|
const struct sm6_value *src;
|
|
unsigned int i = 0;
|
|
@@ -7567,7 +7809,7 @@ static void sm6_parser_emit_load(struct sm6_parser *sm6, const struct dxil_recor
|
|
{
|
|
const struct sm6_type *elem_type = NULL, *pointee_type;
|
|
unsigned int alignment, operand_count, i = 0;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vsir_src_operand *src_params;
|
|
struct vkd3d_shader_register reg;
|
|
const struct sm6_value *ptr;
|
|
uint64_t alignment_code;
|
|
@@ -7646,117 +7888,126 @@ static int phi_incoming_compare(const void *a, const void *b)
|
|
{
|
|
const struct incoming_value *incoming_a = a, *incoming_b = b;
|
|
|
|
- return (incoming_a->block > incoming_b->block) - (incoming_a->block < incoming_b->block);
|
|
+ return vkd3d_u32_compare(incoming_a->block_idx, incoming_b->block_idx);
|
|
}
|
|
|
|
-static void sm6_parser_emit_phi(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
- struct sm6_function *function, struct sm6_block *code_block, struct vkd3d_shader_instruction *ins,
|
|
- struct sm6_value *dst)
|
|
+static void sm6_parser_emit_phi(struct sm6_parser *dxil, const struct dxil_record *record,
|
|
+ struct sm6_function *function, struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
{
|
|
+ struct vsir_src_operand *src_params;
|
|
+ unsigned int i, j, incoming_count;
|
|
struct incoming_value *incoming;
|
|
+ const struct sm6_value *src;
|
|
const struct sm6_type *type;
|
|
- struct sm6_phi *phi;
|
|
- unsigned int i, j;
|
|
- uint64_t src_idx;
|
|
|
|
if (!(record->operand_count & 1))
|
|
{
|
|
- WARN("Invalid operand count %u.\n", record->operand_count);
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
"Invalid operand count %u for phi instruction.", record->operand_count);
|
|
return;
|
|
}
|
|
|
|
- if (!(type = sm6_parser_get_type(sm6, record->operands[0])))
|
|
+ if (!(type = sm6_parser_get_type(dxil, record->operands[0])))
|
|
return;
|
|
if (!sm6_type_is_numeric(type))
|
|
{
|
|
/* dxc doesn't seem to use buffer/resource read return types here. */
|
|
- FIXME("Only scalar numeric types are supported.\n");
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
"Result type class %u of a phi instruction is not scalar numeric.", type->class);
|
|
return;
|
|
}
|
|
|
|
dst->type = type;
|
|
- sm6_parser_init_ssa_value(sm6, dst);
|
|
-
|
|
- if (!(phi = sm6_block_phi_require_space(code_block, sm6)))
|
|
- return;
|
|
- phi->incoming_count = record->operand_count / 2u;
|
|
|
|
- if (!vkd3d_array_reserve((void **)&phi->incoming, &phi->incoming_capacity, phi->incoming_count,
|
|
- sizeof(*phi->incoming)))
|
|
+ incoming_count = record->operand_count / 2u;
|
|
+ if (!(incoming = vkd3d_calloc(incoming_count, sizeof(*incoming))))
|
|
{
|
|
- ERR("Failed to allocate phi incoming array.\n");
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
"Out of memory allocating a phi incoming array.");
|
|
return;
|
|
}
|
|
- incoming = phi->incoming;
|
|
|
|
for (i = 1; i < record->operand_count; i += 2)
|
|
{
|
|
- src_idx = sm6->value_count - decode_rotated_signed_value(record->operands[i]);
|
|
- /* May be a forward reference. */
|
|
- if (src_idx >= sm6->cur_max_value)
|
|
+ /* Phi forward references are handled by the same mechanism as all
|
|
+ * others. Constant and undefined values are never forward references,
|
|
+ * and the only other valid incoming is an SSA value, which will be
|
|
+ * initialised if necessary. */
|
|
+ if (!(src = sm6_parser_get_value_by_rotated_signed_idx(dxil, record->operands[i], type)))
|
|
+ goto done;
|
|
+
|
|
+ if (!sm6_value_is_constant(src) && !sm6_value_is_undef(src) && !sm6_value_is_ssa(src))
|
|
{
|
|
- WARN("Invalid value index %"PRIu64".\n", src_idx);
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
- "Invalid value index %"PRIu64" for a phi incoming value.", src_idx);
|
|
- return;
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
+ "A PHI incoming value is not a constant or SSA register.");
|
|
+ goto done;
|
|
}
|
|
|
|
+ if (src->type != type)
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
+ "The type of a phi incoming value does not match the result type.");
|
|
+
|
|
+ if (!sm6_function_validate_block_index(function, record->operands[i + 1], dxil))
|
|
+ goto done;
|
|
+
|
|
j = i / 2u;
|
|
- /* Store the value index in the register for later resolution. */
|
|
- incoming[j].reg.idx[0].offset = src_idx;
|
|
- incoming[j].block = sm6_function_get_block(function, record->operands[i + 1], sm6);
|
|
+ incoming[j].src = src;
|
|
+ incoming[j].block_idx = record->operands[i + 1];
|
|
}
|
|
|
|
- ins->opcode = VSIR_OP_NOP;
|
|
-
|
|
- qsort(incoming, phi->incoming_count, sizeof(*incoming), phi_incoming_compare);
|
|
+ qsort(incoming, incoming_count, sizeof(*incoming), phi_incoming_compare);
|
|
|
|
- for (i = 1, j = 1; i < phi->incoming_count; ++i)
|
|
+ /* Deduplicate incomings. DXIL phi instructions can contain duplicates. */
|
|
+ for (i = 1, j = 1; i < incoming_count; ++i)
|
|
{
|
|
- if (incoming[i].block != incoming[i - 1].block)
|
|
+ if (incoming[i].block_idx != incoming[i - 1].block_idx)
|
|
{
|
|
incoming[j++] = incoming[i];
|
|
continue;
|
|
}
|
|
|
|
- if (incoming[i].reg.idx[0].offset != incoming[i - 1].reg.idx[0].offset)
|
|
- {
|
|
- WARN("PHI conflict.\n");
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
+ if (incoming[i].src != incoming[i - 1].src)
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
"Two phi incomings have the same block but different values.");
|
|
- }
|
|
}
|
|
- /* if (j == 1) we should be able to set dst->u.reg to incoming[0].reg, but structurisation
|
|
- * may potentially add new incomings. */
|
|
- phi->incoming_count = j;
|
|
+ incoming_count = j;
|
|
+
|
|
+ vsir_instruction_init(ins, &dxil->p.location, VSIR_OP_PHI);
|
|
+ if (!(src_params = instruction_src_params_alloc(ins, incoming_count * 2u, dxil)))
|
|
+ goto done;
|
|
+
|
|
+ for (i = 0; i < incoming_count; ++i)
|
|
+ {
|
|
+ j = i * 2u;
|
|
+ src_param_init_from_value(&src_params[j], incoming[i].src, 0, dxil);
|
|
+ vsir_src_operand_init_label(&src_params[j + 1], incoming[i].block_idx + 1);
|
|
+ }
|
|
+
|
|
+ instruction_dst_param_init_ssa_scalar(ins, 0, dxil);
|
|
+
|
|
+done:
|
|
+ vkd3d_free(incoming);
|
|
}
|
|
|
|
-static void sm6_parser_emit_ret(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
- struct sm6_block *code_block, struct vkd3d_shader_instruction *ins)
|
|
+static void sm6_parser_emit_ret(struct sm6_parser *dxil,
|
|
+ const struct dxil_record *record, struct vkd3d_shader_instruction *ins)
|
|
{
|
|
- if (!dxil_record_validate_operand_count(record, 0, 1, sm6))
|
|
+ if (!dxil_record_validate_operand_count(record, 0, 1, dxil))
|
|
return;
|
|
|
|
if (record->operand_count)
|
|
- FIXME("Non-void return is not implemented.\n");
|
|
-
|
|
- code_block->terminator.type = TERMINATOR_RET;
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
+ "Non-void return is not implemented.");
|
|
|
|
- ins->opcode = VSIR_OP_NOP;
|
|
+ vsir_instruction_init(ins, &dxil->p.location, VSIR_OP_RET);
|
|
}
|
|
|
|
static void sm6_parser_emit_store(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
{
|
|
unsigned int i = 0, alignment, operand_count;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
+ struct vsir_src_operand *src_params;
|
|
+ struct vsir_dst_operand *dst_param;
|
|
const struct sm6_value *ptr, *src;
|
|
struct vkd3d_shader_register reg;
|
|
uint64_t alignment_code;
|
|
@@ -7831,93 +8082,104 @@ static void sm6_parser_emit_store(struct sm6_parser *sm6, const struct dxil_reco
|
|
dst_param->reg.idx_count = 1;
|
|
}
|
|
|
|
-static void sm6_parser_emit_switch(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
- struct sm6_function *function, struct sm6_block *code_block, struct vkd3d_shader_instruction *ins)
|
|
+static void sm6_parser_emit_switch(struct sm6_parser *dxil, const struct dxil_record *record,
|
|
+ struct sm6_function *function, struct vkd3d_shader_instruction *ins)
|
|
{
|
|
- struct sm6_block_terminator *terminator = &code_block->terminator;
|
|
+ struct vsir_src_operand *src_params;
|
|
const struct sm6_type *type;
|
|
const struct sm6_value *src;
|
|
- unsigned int i = 1, j;
|
|
+ uint64_t case_value;
|
|
+ unsigned int i = 1;
|
|
|
|
if (record->operand_count < 3 || !(record->operand_count & 1))
|
|
{
|
|
- WARN("Invalid operand count %u.\n", record->operand_count);
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND_COUNT,
|
|
"Invalid operand count %u for a switch instruction.", record->operand_count);
|
|
return;
|
|
}
|
|
|
|
- if (!(type = sm6_parser_get_type(sm6, record->operands[0])))
|
|
+ if (!(type = sm6_parser_get_type(dxil, record->operands[0])))
|
|
return;
|
|
|
|
- if (!(src = sm6_parser_get_value_by_ref(sm6, record, type, &i))
|
|
- || !sm6_value_validate_is_register(src, sm6))
|
|
+ if (!(src = sm6_parser_get_value_by_ref(dxil, record, type, &i))
|
|
+ || !sm6_value_validate_is_register(src, dxil))
|
|
return;
|
|
VKD3D_ASSERT(i == 2);
|
|
|
|
if (src->type != type)
|
|
- {
|
|
- WARN("Type mismatch.\n");
|
|
- vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
"The type of a switch selector value does not match the selector type.");
|
|
- }
|
|
+
|
|
if (!sm6_type_is_integer(type))
|
|
{
|
|
- WARN("Selector is not scalar integer.\n");
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
"Selector type class %u of a switch instruction is not scalar integer.", type->class);
|
|
return;
|
|
}
|
|
|
|
- vsir_register_from_dxil_value(&terminator->conditional_reg, src, 0, sm6);
|
|
- terminator->type = TERMINATOR_SWITCH;
|
|
+ if (!sm6_function_validate_block_index(function, record->operands[2], dxil))
|
|
+ return;
|
|
|
|
- terminator->case_count = record->operand_count / 2u;
|
|
- if (!(terminator->cases = vkd3d_calloc(terminator->case_count, sizeof(*terminator->cases))))
|
|
+ vsir_instruction_init(ins, &dxil->p.location, VSIR_OP_SWITCH_MONOLITHIC);
|
|
+ if (!(src_params = instruction_src_params_alloc(ins, record->operand_count, dxil)))
|
|
{
|
|
- ERR("Failed to allocate case array.\n");
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
- "Out of memory allocating a switch case array.");
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
return;
|
|
}
|
|
|
|
- /* Executes 'operand_count / 2' times because operand_count is uneven. */
|
|
- for (; i < record->operand_count; i += 2)
|
|
- {
|
|
- j = i / 2u - 1;
|
|
- terminator->cases[j].block = sm6_function_get_block(function, record->operands[i], sm6);
|
|
- /* For structurisation it is convenient to store the default in the case array. */
|
|
- terminator->cases[j].is_default = !j;
|
|
- }
|
|
+ src_param_init_from_value(&src_params[0], src, 0, dxil);
|
|
+ /* Set the default block label id, 1-based. */
|
|
+ vsir_src_operand_init_label(&src_params[1], record->operands[2] + 1);
|
|
+ /* Set a zero merge block label id as a placeholder until it is set during
|
|
+ * the structurisation pass. */
|
|
+ vsir_src_operand_init_label(&src_params[2], 0);
|
|
|
|
for (i = 3; i < record->operand_count; i += 2)
|
|
{
|
|
- if (!(src = sm6_parser_get_value_safe(sm6, record->operands[i])))
|
|
+ if (!(src = sm6_parser_get_value_safe(dxil, record->operands[i])))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
return;
|
|
+ }
|
|
|
|
if (src->type != type)
|
|
- {
|
|
- WARN("Type mismatch.\n");
|
|
- vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
"The type of a switch case value does not match the selector type.");
|
|
- }
|
|
if (!sm6_value_is_constant(src))
|
|
- {
|
|
- WARN("Case value is not a constant.\n");
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
"A switch case value is not a constant.");
|
|
+
|
|
+ case_value = sm6_value_get_constant_uint64(src, dxil);
|
|
+
|
|
+ /* Set the case constant value. 64-bit values are supported. */
|
|
+ if (src_params[0].reg.data_type == VSIR_DATA_U64)
|
|
+ {
|
|
+ vsir_src_operand_init(&src_params[i], VKD3DSPR_IMMCONST64, VSIR_DATA_U64, 0);
|
|
+ src_params[i].reg.u.immconst_u64[0] = case_value;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (case_value > UINT_MAX)
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
+ "Truncating 64-bit switch case value %"PRIx64" to 32 bits.", case_value);
|
|
+ vsir_src_operand_init(&src_params[i], VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
+ src_params[i].reg.u.immconst_u32[0] = case_value;
|
|
}
|
|
|
|
- terminator->cases[i / 2u].value = sm6_value_get_constant_uint64(src, sm6);
|
|
+ if (!sm6_function_validate_block_index(function, record->operands[i + 1], dxil))
|
|
+ {
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
+ return;
|
|
+ }
|
|
+ /* Set the case block label id, 1-based. */
|
|
+ vsir_src_operand_init_label(&src_params[i + 1], record->operands[i + 1] + 1);
|
|
}
|
|
-
|
|
- ins->opcode = VSIR_OP_NOP;
|
|
}
|
|
|
|
static void sm6_parser_emit_vselect(struct sm6_parser *sm6, const struct dxil_record *record,
|
|
struct vkd3d_shader_instruction *ins, struct sm6_value *dst)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vsir_src_operand *src_params;
|
|
const struct sm6_value *src[3];
|
|
unsigned int i = 0;
|
|
|
|
@@ -8232,72 +8494,24 @@ static void metadata_attachment_record_apply(const struct dxil_record *record, e
|
|
}
|
|
}
|
|
|
|
-static bool sm6_function_blocks_reserve(struct sm6_function *function, unsigned int reserve)
|
|
+static void sm6_function_emit_label(struct sm6_function *function, unsigned int label_id, struct sm6_parser *dxil)
|
|
{
|
|
- if (!vkd3d_array_reserve((void **)&function->blocks, &function->block_capacity,
|
|
- reserve, sizeof(*function->blocks)))
|
|
- {
|
|
- ERR("Failed to allocate code block array.\n");
|
|
- return false;
|
|
- }
|
|
- return true;
|
|
-}
|
|
-
|
|
-static struct sm6_block *sm6_function_create_block(struct sm6_function *function)
|
|
-{
|
|
- struct sm6_block *block;
|
|
-
|
|
- if (!(block = sm6_block_create()))
|
|
- return NULL;
|
|
-
|
|
- function->blocks[function->block_count++] = block;
|
|
- /* Set the id to the array index + 1. */
|
|
- block->id = function->block_count;
|
|
-
|
|
- return block;
|
|
-}
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src_param;
|
|
|
|
-static enum vkd3d_result sm6_function_resolve_phi_incomings(const struct sm6_function *function,
|
|
- struct sm6_parser *sm6)
|
|
-{
|
|
- const struct sm6_block *block;
|
|
- size_t i, j, block_idx;
|
|
+ ins = &function->instructions.elements[function->instructions.count++];
|
|
|
|
- for (block_idx = 0; block_idx < function->block_count; ++block_idx)
|
|
+ vsir_instruction_init(ins, &dxil->p.location, VSIR_OP_LABEL);
|
|
+ if (!(src_param = instruction_src_params_alloc(ins, 1, dxil)))
|
|
{
|
|
- block = function->blocks[block_idx];
|
|
-
|
|
- for (i = 0; i < block->phi_count; ++i)
|
|
- {
|
|
- struct sm6_phi *phi = &block->phi[i];
|
|
- const struct sm6_value *src;
|
|
-
|
|
- for (j = 0; j < phi->incoming_count; ++j)
|
|
- {
|
|
- src = &sm6->values[phi->incoming[j].reg.idx[0].offset];
|
|
- if (!sm6_value_is_constant(src) && !sm6_value_is_undef(src) && !sm6_value_is_ssa(src))
|
|
- {
|
|
- FIXME("PHI incoming value is not a constant or SSA register.\n");
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
- "A PHI incoming value is not a constant or SSA register.");
|
|
- return VKD3D_ERROR_INVALID_SHADER;
|
|
- }
|
|
- if (src->type != phi->value.type)
|
|
- {
|
|
- WARN("Type mismatch.\n");
|
|
- vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
- "The type of a phi incoming value does not match the result type.");
|
|
- }
|
|
- vsir_register_from_dxil_value(&phi->incoming[j].reg, src, 0, sm6);
|
|
- }
|
|
- }
|
|
+ vkd3d_shader_instruction_make_nop(ins);
|
|
+ return;
|
|
}
|
|
-
|
|
- return VKD3D_OK;
|
|
+ vsir_src_operand_init_label(src_param, label_id);
|
|
}
|
|
|
|
-static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const struct dxil_block *block,
|
|
- struct sm6_function *function)
|
|
+static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6,
|
|
+ const struct dxil_block *block, struct sm6_function *function)
|
|
{
|
|
struct vsir_program *program = sm6->program;
|
|
struct vkd3d_shader_instruction *ins;
|
|
@@ -8305,7 +8519,7 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
const struct dxil_record *record;
|
|
const struct sm6_type *fwd_type;
|
|
bool ret_found, is_terminator;
|
|
- struct sm6_block *code_block;
|
|
+ bool emitted_label = false;
|
|
struct sm6_value *dst;
|
|
|
|
if (!(function->declaration = sm6_parser_next_function_definition(sm6)))
|
|
@@ -8333,45 +8547,41 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
}
|
|
|
|
- if (!sm6_function_blocks_reserve(function, block_count))
|
|
- return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
-
|
|
- /* Pre-allocate all blocks to simplify instruction parsing. */
|
|
- for (i = 0; i < block_count; ++i)
|
|
- {
|
|
- if (!sm6_function_create_block(function))
|
|
- {
|
|
- ERR("Failed to allocate code block.\n");
|
|
- return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
- }
|
|
- }
|
|
function->block_count = block_count;
|
|
- code_block = function->blocks[0];
|
|
|
|
sm6->cur_max_value = function->value_count;
|
|
|
|
for (i = 1, block_idx = 0, ret_found = false; i < block->record_count; ++i)
|
|
{
|
|
+ struct function_emission_state state = {0};
|
|
+
|
|
sm6->p.location.column = i;
|
|
|
|
- if (!code_block)
|
|
+ if (block_idx >= function->block_count)
|
|
{
|
|
- WARN("Invalid block count %zu.\n", function->block_count);
|
|
vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_OPERAND,
|
|
"Invalid block count %zu.", function->block_count);
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
}
|
|
|
|
/* Some instructions can emit >1 IR instruction, so extra may be used. */
|
|
- if (!vkd3d_array_reserve((void **)&code_block->instructions, &code_block->instruction_capacity,
|
|
- code_block->instruction_count + MAX_IR_INSTRUCTIONS_PER_DXIL_INSTRUCTION,
|
|
- sizeof(*code_block->instructions)))
|
|
+ if (!vkd3d_array_reserve((void **)&function->instructions.elements, &function->instructions.capacity,
|
|
+ function->instructions.count + !emitted_label + MAX_IR_INSTRUCTIONS_PER_DXIL_INSTRUCTION,
|
|
+ sizeof(*function->instructions.elements)))
|
|
{
|
|
ERR("Failed to allocate instructions.\n");
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
}
|
|
|
|
- ins = &code_block->instructions[code_block->instruction_count];
|
|
+ if (!emitted_label)
|
|
+ {
|
|
+ /* Label id is 1-based. Do not emit a label until it is known that
|
|
+ * instructions will follow. */
|
|
+ sm6_function_emit_label(function, block_idx + 1, sm6);
|
|
+ emitted_label = true;
|
|
+ }
|
|
+
|
|
+ ins = &function->instructions.elements[function->instructions.count];
|
|
ins->opcode = VSIR_OP_INVALID;
|
|
|
|
dst = sm6_parser_get_current_value(sm6);
|
|
@@ -8380,33 +8590,29 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
dst->is_back_ref = true;
|
|
is_terminator = false;
|
|
|
|
+ state.function = function;
|
|
+ state.record = block->records[i];
|
|
+ state.ins = ins;
|
|
+
|
|
record = block->records[i];
|
|
switch (record->code)
|
|
{
|
|
case FUNC_CODE_INST_ALLOCA:
|
|
- sm6_parser_emit_alloca(sm6, record, ins, dst);
|
|
+ sm6_parser_emit_alloca(sm6, &state);
|
|
break;
|
|
case FUNC_CODE_INST_ATOMICRMW:
|
|
- {
|
|
- struct function_emission_state state = {code_block, ins};
|
|
- sm6_parser_emit_atomicrmw(sm6, record, &state, dst);
|
|
- program->temp_count = max(program->temp_count, state.temp_idx);
|
|
+ sm6_parser_emit_atomicrmw(sm6, &state);
|
|
break;
|
|
- }
|
|
case FUNC_CODE_INST_BINOP:
|
|
- sm6_parser_emit_binop(sm6, record, code_block, ins, dst);
|
|
+ sm6_parser_emit_binop(sm6, &state);
|
|
break;
|
|
case FUNC_CODE_INST_BR:
|
|
- sm6_parser_emit_br(sm6, record, function, code_block, ins);
|
|
+ sm6_parser_emit_br(sm6, &state);
|
|
is_terminator = true;
|
|
break;
|
|
case FUNC_CODE_INST_CALL:
|
|
- {
|
|
- struct function_emission_state state = {code_block, ins};
|
|
- sm6_parser_emit_call(sm6, record, &state, dst);
|
|
- program->temp_count = max(program->temp_count, state.temp_idx);
|
|
+ sm6_parser_emit_call(sm6, &state);
|
|
break;
|
|
- }
|
|
case FUNC_CODE_INST_CAST:
|
|
sm6_parser_emit_cast(sm6, record, ins, dst);
|
|
break;
|
|
@@ -8426,10 +8632,10 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
sm6_parser_emit_load(sm6, record, ins, dst);
|
|
break;
|
|
case FUNC_CODE_INST_PHI:
|
|
- sm6_parser_emit_phi(sm6, record, function, code_block, ins, dst);
|
|
+ sm6_parser_emit_phi(sm6, record, function, ins, dst);
|
|
break;
|
|
case FUNC_CODE_INST_RET:
|
|
- sm6_parser_emit_ret(sm6, record, code_block, ins);
|
|
+ sm6_parser_emit_ret(sm6, record, ins);
|
|
is_terminator = true;
|
|
ret_found = true;
|
|
break;
|
|
@@ -8437,7 +8643,7 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
sm6_parser_emit_store(sm6, record, ins, dst);
|
|
break;
|
|
case FUNC_CODE_INST_SWITCH:
|
|
- sm6_parser_emit_switch(sm6, record, function, code_block, ins);
|
|
+ sm6_parser_emit_switch(sm6, record, function, ins);
|
|
is_terminator = true;
|
|
break;
|
|
case FUNC_CODE_INST_VSELECT:
|
|
@@ -8448,23 +8654,22 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
}
|
|
|
|
+ program->temp_count = max(program->temp_count, state.temp_idx);
|
|
+
|
|
if (sm6->p.status < 0)
|
|
return sm6->p.status;
|
|
|
|
if (record->attachment)
|
|
metadata_attachment_record_apply(record->attachment, record->code, ins, dst, sm6);
|
|
|
|
- /* This is specific for PHI nodes, but must happen after attachments have been applied. */
|
|
- if (record->code == FUNC_CODE_INST_PHI)
|
|
- code_block->phi[code_block->phi_count - 1].value = *dst;
|
|
+ if (!state.pushed_instruction)
|
|
+ function->instructions.count += ins->opcode != VSIR_OP_NOP;
|
|
|
|
if (is_terminator)
|
|
{
|
|
++block_idx;
|
|
- code_block = (block_idx < function->block_count) ? function->blocks[block_idx] : NULL;
|
|
+ emitted_label = false;
|
|
}
|
|
- if (code_block)
|
|
- code_block->instruction_count += ins->opcode != VSIR_OP_NOP;
|
|
|
|
if (dst->type && fwd_type && dst->type != fwd_type)
|
|
{
|
|
@@ -8482,148 +8687,163 @@ static enum vkd3d_result sm6_parser_function_init(struct sm6_parser *sm6, const
|
|
return VKD3D_ERROR_INVALID_SHADER;
|
|
}
|
|
|
|
- return sm6_function_resolve_phi_incomings(function, sm6);
|
|
+ return VKD3D_OK;
|
|
}
|
|
|
|
-static void sm6_block_emit_terminator(const struct sm6_block *block, struct sm6_parser *sm6)
|
|
+static void sm6_parser_init_parameter_attributes(struct sm6_parser *dxil, const struct dxil_block *block)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
- struct vkd3d_shader_instruction *ins;
|
|
- unsigned int i, count;
|
|
+ size_t i;
|
|
|
|
- switch (block->terminator.type)
|
|
+ if (dxil->parameter_attributes)
|
|
{
|
|
- case TERMINATOR_UNCOND_BR:
|
|
- if (!block->terminator.true_block)
|
|
- return;
|
|
- if (!(ins = sm6_parser_add_instruction(sm6, VSIR_OP_BRANCH)))
|
|
- return;
|
|
- if (!(src_params = instruction_src_params_alloc(ins, 1, sm6)))
|
|
- {
|
|
- vkd3d_shader_instruction_make_nop(ins);
|
|
- return;
|
|
- }
|
|
- vsir_src_param_init_label(&src_params[0], block->terminator.true_block->id);
|
|
- break;
|
|
-
|
|
- case TERMINATOR_COND_BR:
|
|
- if (!block->terminator.true_block || !block->terminator.false_block)
|
|
- return;
|
|
- if (!(ins = sm6_parser_add_instruction(sm6, VSIR_OP_BRANCH)))
|
|
- return;
|
|
- if (!(src_params = instruction_src_params_alloc(ins, 3, sm6)))
|
|
- {
|
|
- vkd3d_shader_instruction_make_nop(ins);
|
|
- return;
|
|
- }
|
|
- src_param_init(&src_params[0]);
|
|
- src_params[0].reg = block->terminator.conditional_reg;
|
|
- vsir_src_param_init_label(&src_params[1], block->terminator.true_block->id);
|
|
- vsir_src_param_init_label(&src_params[2], block->terminator.false_block->id);
|
|
- break;
|
|
-
|
|
- case TERMINATOR_SWITCH:
|
|
- if (!(ins = sm6_parser_add_instruction(sm6, VSIR_OP_SWITCH_MONOLITHIC)))
|
|
- return;
|
|
- if (!(src_params = instruction_src_params_alloc(ins, block->terminator.case_count * 2u + 1, sm6)))
|
|
- {
|
|
- vkd3d_shader_instruction_make_nop(ins);
|
|
- return;
|
|
- }
|
|
- src_param_init(&src_params[0]);
|
|
- src_params[0].reg = block->terminator.conditional_reg;
|
|
- /* TODO: emit the merge block id. */
|
|
- vsir_src_param_init_label(&src_params[2], 0);
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_DUPLICATED_BLOCK,
|
|
+ "Duplicated PARAMATTR block.");
|
|
+ return;
|
|
+ }
|
|
|
|
- for (i = 0, count = 3; i < block->terminator.case_count; ++i)
|
|
- {
|
|
- const struct terminator_case *switch_case;
|
|
- const struct sm6_block *case_block;
|
|
+ dxil->parameter_attribute_count = block->record_count;
|
|
|
|
- switch_case = &block->terminator.cases[i];
|
|
- if (!(case_block = switch_case->block))
|
|
- {
|
|
- VKD3D_ASSERT(sm6->p.status < 0);
|
|
- continue;
|
|
- }
|
|
- if (switch_case->is_default)
|
|
- {
|
|
- vsir_src_param_init_label(&src_params[1], case_block->id);
|
|
- continue;
|
|
- }
|
|
+ if (!(dxil->parameter_attributes = vkd3d_calloc(block->record_count, sizeof(*dxil->parameter_attributes))))
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
+ "Out of memory while allocating the parameter attributes array.");
|
|
+ dxil->parameter_attribute_count = 0;
|
|
+ return;
|
|
+ }
|
|
|
|
- if (src_params[0].reg.data_type == VSIR_DATA_U64)
|
|
- {
|
|
- vsir_src_param_init(&src_params[count], VKD3DSPR_IMMCONST64, VSIR_DATA_U64, 0);
|
|
- src_params[count++].reg.u.immconst_u64[0] = switch_case->value;
|
|
- }
|
|
- else
|
|
- {
|
|
- if (switch_case->value > UINT_MAX)
|
|
- {
|
|
- WARN("Truncating 64-bit constant %"PRIx64".\n", switch_case->value);
|
|
- vkd3d_shader_parser_warning(&sm6->p, VKD3D_SHADER_WARNING_DXIL_TYPE_MISMATCH,
|
|
- "Truncating 64-bit switch case value %"PRIx64" to 32 bits.", switch_case->value);
|
|
- }
|
|
- vsir_src_param_init(&src_params[count], VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
- src_params[count++].reg.u.immconst_u32[0] = switch_case->value;
|
|
- }
|
|
- vsir_src_param_init_label(&src_params[count++], case_block->id);
|
|
- }
|
|
+ for (i = 0; i < block->record_count; ++i)
|
|
+ {
|
|
+ struct dxil_parameter_attribute *attribute = &dxil->parameter_attributes[i];
|
|
+ struct dxil_record *record = block->records[i];
|
|
|
|
- break;
|
|
+ if (record->code != PARAMATTR_CODE_ENTRY)
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_NOT_IMPLEMENTED,
|
|
+ "PARAMATTR record code %u is not implemented.", record->code);
|
|
+ return;
|
|
+ }
|
|
|
|
- case TERMINATOR_RET:
|
|
- sm6_parser_add_instruction(sm6, VSIR_OP_RET);
|
|
- break;
|
|
+ if (!(attribute->groups = vkd3d_calloc(record->operand_count, sizeof(*attribute->groups))))
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
+ "Out of memory while allocating the groups array.");
|
|
+ return;
|
|
+ }
|
|
|
|
- default:
|
|
- vkd3d_unreachable();
|
|
+ memcpy(attribute->groups, record->operands, record->operand_count * sizeof(*attribute->groups));
|
|
+ attribute->group_count = record->operand_count;
|
|
}
|
|
}
|
|
|
|
-static void sm6_block_emit_phi(const struct sm6_block *block, struct sm6_parser *sm6)
|
|
+static void sm6_parser_init_attribute_groups(struct sm6_parser *dxil, const struct dxil_block *block)
|
|
{
|
|
- struct vkd3d_shader_instruction *ins;
|
|
- unsigned int i, j, incoming_count;
|
|
- const struct sm6_phi *src_phi;
|
|
+ size_t i, j;
|
|
|
|
- for (i = 0; i < block->phi_count; ++i)
|
|
+ if (dxil->attribute_groups)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_DUPLICATED_BLOCK,
|
|
+ "Duplicated PARAMATTR_GROUP block.");
|
|
+ return;
|
|
+ }
|
|
|
|
- src_phi = &block->phi[i];
|
|
- incoming_count = src_phi->incoming_count;
|
|
+ dxil->attribute_group_count = block->record_count;
|
|
|
|
- if (!(ins = sm6_parser_add_instruction(sm6, VSIR_OP_PHI)))
|
|
- return;
|
|
- if (!(src_params = instruction_src_params_alloc(ins, incoming_count * 2u, sm6)))
|
|
- {
|
|
- vkd3d_shader_instruction_make_nop(ins);
|
|
- return;
|
|
- }
|
|
- if (!(dst_param = instruction_dst_params_alloc(ins, 1, sm6)))
|
|
+ if (!(dxil->attribute_groups = vkd3d_calloc(block->record_count, sizeof(*dxil->attribute_groups))))
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
+ "Out of memory while allocating the parameter attribute groups array.");
|
|
+ dxil->attribute_group_count = 0;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ for (i = 0, j = 0; i < block->record_count; ++i)
|
|
+ {
|
|
+ struct dxil_attribute_group *group = &dxil->attribute_groups[j];
|
|
+ struct dxil_record *record = block->records[i];
|
|
+ bool failed = false;
|
|
+ unsigned int k;
|
|
+
|
|
+ if (record->code != PARAMATTR_GRP_CODE_ENTRY)
|
|
{
|
|
- vkd3d_shader_instruction_make_nop(ins);
|
|
- return;
|
|
+ vkd3d_shader_parser_warning(&dxil->p, VKD3D_SHADER_WARNING_DXIL_IGNORING_RECORD,
|
|
+ "Ignoring PARAMATTR_GROUP record code %u.", record->code);
|
|
+ continue;
|
|
}
|
|
|
|
- for (j = 0; j < incoming_count; ++j)
|
|
+ if (!dxil_record_validate_operand_min_count(record, 2, dxil))
|
|
+ continue;
|
|
+
|
|
+ if (record->operands[0] > UINT_MAX || record->operands[0] == 0)
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_CONSTANT,
|
|
+ "PARAMATTR_GROUP group id %"PRIu64" is invalid.", record->operands[0]);
|
|
+
|
|
+ if (record->operands[1] > UINT_MAX)
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_CONSTANT,
|
|
+ "PARAMATTR_GROUP parameter index %"PRIu64" is invalid.", record->operands[1]);
|
|
+
|
|
+ group->group_id = record->operands[0];
|
|
+ group->parameter_idx = record->operands[1];
|
|
+
|
|
+ for (k = 2; k < record->operand_count && !failed;)
|
|
{
|
|
- const struct sm6_block *incoming_block = src_phi->incoming[j].block;
|
|
- unsigned int index = j * 2;
|
|
+ uint64_t kind = record->operands[k++];
|
|
+ struct dxil_attribute *attribute;
|
|
+
|
|
+ if (!vkd3d_array_reserve((void **)&group->attributes, &group->attribute_capacity,
|
|
+ group->attribute_count + 1, sizeof(*group->attributes)))
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
+ "Out of memory allocating the attribute array.");
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ attribute = &group->attributes[group->attribute_count++];
|
|
+ memset(attribute, 0, sizeof(*attribute));
|
|
+ attribute->kind = kind;
|
|
+
|
|
+ switch (kind)
|
|
+ {
|
|
+ case ATTRIBUTE_WELL_KNOWN:
|
|
+ if (!dxil_record_validate_operand_min_count(record, k + 1, dxil))
|
|
+ {
|
|
+ failed = true;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ attribute->key.well_known = record->operands[k++];
|
|
+ break;
|
|
+
|
|
+ case ATTRIBUTE_WELL_KNOWN_WITH_INTEGER_VALUE:
|
|
+ if (!dxil_record_validate_operand_min_count(record, k + 2, dxil))
|
|
+ {
|
|
+ failed = true;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ attribute->key.well_known = record->operands[k++];
|
|
+ attribute->value.numeric = record->operands[k++];
|
|
+ break;
|
|
+
|
|
+ case ATTRIBUTE_STRING:
|
|
+ if (!(attribute->key.string = dxil_record_to_zero_terminated_string(record, &k, dxil)))
|
|
+ failed = true;
|
|
+ break;
|
|
+
|
|
+ case ATTRIBUTE_STRING_WITH_STRING_VALUE:
|
|
+ if (!(attribute->key.string = dxil_record_to_zero_terminated_string(record, &k, dxil))
|
|
+ || !(attribute->value.string = dxil_record_to_zero_terminated_string(record, &k, dxil)))
|
|
+ failed = true;
|
|
+ break;
|
|
|
|
- src_param_init(&src_params[index]);
|
|
- src_params[index].reg = src_phi->incoming[j].reg;
|
|
- if (incoming_block)
|
|
- vsir_src_param_init_label(&src_params[index + 1], incoming_block->id);
|
|
- else
|
|
- VKD3D_ASSERT(sm6->p.status < 0);
|
|
+ default:
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_ATTRIBUTE_KIND,
|
|
+ "Unrecognised PARAMATTR_GROUP attribute kind %"PRIu64".", kind);
|
|
+ failed = true;
|
|
+ break;
|
|
+ }
|
|
}
|
|
|
|
- dst_param_init(dst_param);
|
|
- vsir_register_from_dxil_value(&dst_param->reg, &src_phi->value, 0, sm6);
|
|
+ ++j;
|
|
}
|
|
}
|
|
|
|
@@ -8647,6 +8867,16 @@ static enum vkd3d_result sm6_parser_module_init(struct sm6_parser *sm6, const st
|
|
|
|
switch (block->id)
|
|
{
|
|
+ case PARAMATTR_BLOCK:
|
|
+ sm6_parser_init_parameter_attributes(sm6, block);
|
|
+ if (sm6->p.status < 0)
|
|
+ return sm6->p.status;
|
|
+ break;
|
|
+
|
|
+ case PARAMATTR_GROUP_BLOCK:
|
|
+ sm6_parser_init_attribute_groups(sm6, block);
|
|
+ break;
|
|
+
|
|
case CONSTANTS_BLOCK:
|
|
/* Level 1 (global) constants are already done in sm6_parser_globals_init(). */
|
|
if (level < 2)
|
|
@@ -8668,8 +8898,6 @@ static enum vkd3d_result sm6_parser_module_init(struct sm6_parser *sm6, const st
|
|
|
|
case BLOCKINFO_BLOCK:
|
|
case MODULE_BLOCK:
|
|
- case PARAMATTR_BLOCK:
|
|
- case PARAMATTR_GROUP_BLOCK:
|
|
case VALUE_SYMTAB_BLOCK:
|
|
case METADATA_BLOCK:
|
|
case METADATA_ATTACHMENT_BLOCK:
|
|
@@ -8681,53 +8909,29 @@ static enum vkd3d_result sm6_parser_module_init(struct sm6_parser *sm6, const st
|
|
break;
|
|
}
|
|
|
|
- return VKD3D_OK;
|
|
-}
|
|
-
|
|
-static void sm6_parser_emit_label(struct sm6_parser *sm6, unsigned int label_id)
|
|
-{
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
- struct vkd3d_shader_instruction *ins;
|
|
-
|
|
- if (!(ins = sm6_parser_add_instruction(sm6, VSIR_OP_LABEL)))
|
|
- return;
|
|
- if (!(src_param = instruction_src_params_alloc(ins, 1, sm6)))
|
|
- {
|
|
- vkd3d_shader_instruction_make_nop(ins);
|
|
- return;
|
|
- }
|
|
- vsir_src_param_init_label(src_param, label_id);
|
|
+ return sm6->p.status;
|
|
}
|
|
|
|
-static enum vkd3d_result sm6_function_emit_blocks(const struct sm6_function *function, struct sm6_parser *sm6)
|
|
+static enum vkd3d_result sm6_function_emit_instructions(const struct sm6_function *function, struct sm6_parser *dxil)
|
|
{
|
|
- struct vsir_program *program = sm6->program;
|
|
+ struct vsir_program *program = dxil->program;
|
|
struct vkd3d_shader_instruction *ins;
|
|
- unsigned int i, j;
|
|
+ unsigned int i;
|
|
|
|
program->block_count = max(program->block_count, function->block_count);
|
|
|
|
- for (i = 0; i < function->block_count; ++i)
|
|
+ for (i = 0; i < function->instructions.count; ++i)
|
|
{
|
|
- const struct sm6_block *block = function->blocks[i];
|
|
-
|
|
- sm6_parser_emit_label(sm6, block->id);
|
|
- sm6_block_emit_phi(block, sm6);
|
|
-
|
|
- for (j = 0; j < block->instruction_count; ++j)
|
|
+ if (!(ins = vsir_program_append(program)))
|
|
{
|
|
- if (!(ins = vsir_program_append(program)))
|
|
- {
|
|
- vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
- "Out of memory emitting block instructions.");
|
|
- return sm6->p.status;
|
|
- }
|
|
- *ins = block->instructions[j];
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_OUT_OF_MEMORY,
|
|
+ "Out of memory while emitting instructions.");
|
|
+ return dxil->p.status;
|
|
}
|
|
- sm6_block_emit_terminator(block, sm6);
|
|
+ *ins = function->instructions.elements[i];
|
|
}
|
|
|
|
- return sm6->p.status;
|
|
+ return dxil->p.status;
|
|
}
|
|
|
|
static bool sm6_parser_allocate_named_metadata(struct sm6_parser *sm6)
|
|
@@ -9372,7 +9576,7 @@ static void init_resource_declaration(struct vkd3d_shader_resource *resource,
|
|
enum vkd3d_shader_register_type reg_type, enum vsir_data_type data_type,
|
|
unsigned int id, const struct vkd3d_shader_register_range *range)
|
|
{
|
|
- struct vkd3d_shader_dst_param *param = &resource->reg;
|
|
+ struct vsir_dst_operand *param = &resource->reg;
|
|
|
|
param->modifiers = 0;
|
|
param->shift = 0;
|
|
@@ -10706,34 +10910,62 @@ static void sm6_symtab_cleanup(struct sm6_symbol *symbols, size_t count)
|
|
vkd3d_free(symbols);
|
|
}
|
|
|
|
-static void sm6_phi_destroy(struct sm6_phi *phi)
|
|
+static void sm6_functions_cleanup(struct sm6_function *functions, size_t count)
|
|
{
|
|
- vkd3d_free(phi->incoming);
|
|
+ size_t i;
|
|
+
|
|
+ for (i = 0; i < count; ++i)
|
|
+ {
|
|
+ vkd3d_free(functions[i].instructions.elements);
|
|
+ }
|
|
+ vkd3d_free(functions);
|
|
}
|
|
|
|
-static void sm6_block_destroy(struct sm6_block *block)
|
|
+static void sm6_parser_cleanup_parameter_attributes(struct sm6_parser *sm6)
|
|
{
|
|
- unsigned int i;
|
|
+ size_t i;
|
|
|
|
- vkd3d_free(block->instructions);
|
|
- for (i = 0; i < block->phi_count; ++i)
|
|
- sm6_phi_destroy(&block->phi[i]);
|
|
- vkd3d_free(block->phi);
|
|
- vkd3d_free(block->terminator.cases);
|
|
- vkd3d_free(block);
|
|
+ for (i = 0; i < sm6->parameter_attribute_count; ++i)
|
|
+ {
|
|
+ struct dxil_parameter_attribute *attribute = &sm6->parameter_attributes[i];
|
|
+
|
|
+ vkd3d_free(attribute->groups);
|
|
+ }
|
|
+
|
|
+ vkd3d_free(sm6->parameter_attributes);
|
|
}
|
|
|
|
-static void sm6_functions_cleanup(struct sm6_function *functions, size_t count)
|
|
+static void sm6_parser_cleanup_attribute_groups(struct sm6_parser *dxil)
|
|
{
|
|
+ struct dxil_attribute_group *group;
|
|
+ struct dxil_attribute *attribute;
|
|
size_t i, j;
|
|
|
|
- for (i = 0; i < count; ++i)
|
|
+ for (i = 0; i < dxil->attribute_group_count; ++i)
|
|
{
|
|
- for (j = 0; j < functions[i].block_count; ++j)
|
|
- sm6_block_destroy(functions[i].blocks[j]);
|
|
- vkd3d_free(functions[i].blocks);
|
|
+ group = &dxil->attribute_groups[i];
|
|
+ for (j = 0; j < group->attribute_count; ++j)
|
|
+ {
|
|
+ attribute = &group->attributes[j];
|
|
+ switch (attribute->kind)
|
|
+ {
|
|
+ case ATTRIBUTE_WELL_KNOWN:
|
|
+ case ATTRIBUTE_WELL_KNOWN_WITH_INTEGER_VALUE:
|
|
+ break;
|
|
+
|
|
+ case ATTRIBUTE_STRING:
|
|
+ vkd3d_free((void *)attribute->key.string);
|
|
+ break;
|
|
+
|
|
+ case ATTRIBUTE_STRING_WITH_STRING_VALUE:
|
|
+ vkd3d_free((void *)attribute->key.string);
|
|
+ vkd3d_free((void *)attribute->value.string);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
}
|
|
- vkd3d_free(functions);
|
|
+
|
|
+ vkd3d_free(dxil->attribute_groups);
|
|
}
|
|
|
|
static void sm6_parser_cleanup(struct sm6_parser *sm6)
|
|
@@ -10743,11 +10975,69 @@ static void sm6_parser_cleanup(struct sm6_parser *sm6)
|
|
sm6_type_table_cleanup(sm6->types, sm6->type_count);
|
|
sm6_symtab_cleanup(sm6->global_symbols, sm6->global_symbol_count);
|
|
sm6_functions_cleanup(sm6->functions, sm6->function_count);
|
|
+ sm6_parser_cleanup_parameter_attributes(sm6);
|
|
+ sm6_parser_cleanup_attribute_groups(sm6);
|
|
sm6_parser_metadata_cleanup(sm6);
|
|
vkd3d_free(sm6->descriptors);
|
|
vkd3d_free(sm6->values);
|
|
}
|
|
|
|
+static enum vsir_denorm_mode sm6_function_get_denorm_mode(const struct sm6_function *function,
|
|
+ struct sm6_parser *dxil)
|
|
+{
|
|
+ unsigned int attribs_id = function->declaration->u.function.attribs_id;
|
|
+ const struct dxil_parameter_attribute *parameter_attribute;
|
|
+ size_t i, j, k;
|
|
+
|
|
+ if (!attribs_id)
|
|
+ return VSIR_DENORM_FLUSH_TO_ZERO;
|
|
+
|
|
+ if (attribs_id > dxil->parameter_attribute_count)
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_ATTRIBUTE,
|
|
+ "Invalid attribute id %u.", attribs_id);
|
|
+ return VSIR_DENORM_FLUSH_TO_ZERO;
|
|
+ }
|
|
+
|
|
+ parameter_attribute = &dxil->parameter_attributes[attribs_id - 1];
|
|
+
|
|
+ for (i = 0; i < parameter_attribute->group_count; ++i)
|
|
+ {
|
|
+ for (j = 0; j < dxil->attribute_group_count; ++j)
|
|
+ {
|
|
+ struct dxil_attribute_group *attribute_group = &dxil->attribute_groups[j];
|
|
+
|
|
+ if (attribute_group->group_id != parameter_attribute->groups[i]
|
|
+ || attribute_group->parameter_idx != ~0u)
|
|
+ continue;
|
|
+
|
|
+ for (k = 0; k < attribute_group->attribute_count; ++k)
|
|
+ {
|
|
+ struct dxil_attribute *attribute = &attribute_group->attributes[k];
|
|
+
|
|
+ if (attribute->kind != ATTRIBUTE_STRING_WITH_STRING_VALUE
|
|
+ || strcmp(attribute->key.string, "fp32-denorm-mode"))
|
|
+ continue;
|
|
+
|
|
+ if (!strcmp(attribute->value.string, "preserve"))
|
|
+ return VSIR_DENORM_PRESERVE;
|
|
+
|
|
+ if (!strcmp(attribute->value.string, "ftz"))
|
|
+ return VSIR_DENORM_FLUSH_TO_ZERO;
|
|
+
|
|
+ if (!strcmp(attribute->value.string, "any"))
|
|
+ return VSIR_DENORM_ANY;
|
|
+
|
|
+ vkd3d_shader_parser_error(&dxil->p, VKD3D_SHADER_ERROR_DXIL_INVALID_ATTRIBUTE,
|
|
+ "Invalid value for attribute `fp32-denorm-mode'.");
|
|
+ return VSIR_DENORM_FLUSH_TO_ZERO;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return VSIR_DENORM_FLUSH_TO_ZERO;
|
|
+}
|
|
+
|
|
static struct sm6_function *sm6_parser_get_function(const struct sm6_parser *sm6, const char *name)
|
|
{
|
|
size_t i;
|
|
@@ -11028,11 +11318,15 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, struct vsir_pro
|
|
goto fail;
|
|
}
|
|
|
|
+ program->f32_denorm_mode = sm6_function_get_denorm_mode(fn, sm6);
|
|
+
|
|
if (version.type == VKD3D_SHADER_TYPE_HULL)
|
|
{
|
|
+ enum vsir_denorm_mode cp_denorm_mode;
|
|
+
|
|
sm6_parser_add_instruction(sm6, VSIR_OP_HS_CONTROL_POINT_PHASE);
|
|
|
|
- if ((ret = sm6_function_emit_blocks(fn, sm6)) < 0)
|
|
+ if ((ret = sm6_function_emit_instructions(fn, sm6)) < 0)
|
|
goto fail;
|
|
|
|
if (!(fn = sm6_parser_get_function(sm6, sm6->patch_constant_function)))
|
|
@@ -11045,15 +11339,24 @@ static enum vkd3d_result sm6_parser_init(struct sm6_parser *sm6, struct vsir_pro
|
|
goto fail;
|
|
}
|
|
|
|
+ cp_denorm_mode = sm6_function_get_denorm_mode(fn, sm6);
|
|
+
|
|
+ if (sm6->p.status >= 0 && program->f32_denorm_mode != cp_denorm_mode)
|
|
+ {
|
|
+ vkd3d_shader_parser_error(&sm6->p, VKD3D_SHADER_ERROR_DXIL_INVALID_ATTRIBUTE,
|
|
+ "Patch constant denorm mode %u doesn't match control point denorm mode %u.",
|
|
+ program->f32_denorm_mode, cp_denorm_mode);
|
|
+ }
|
|
+
|
|
sm6_parser_add_instruction(sm6, VSIR_OP_HS_FORK_PHASE);
|
|
- if ((ret = sm6_function_emit_blocks(fn, sm6)) < 0)
|
|
+ if ((ret = sm6_function_emit_instructions(fn, sm6)) < 0)
|
|
goto fail;
|
|
|
|
expected_function_count = 2;
|
|
}
|
|
else
|
|
{
|
|
- if ((ret = sm6_function_emit_blocks(fn, sm6)) < 0)
|
|
+ if ((ret = sm6_function_emit_instructions(fn, sm6)) < 0)
|
|
goto fail;
|
|
expected_function_count = 1;
|
|
}
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/glsl.c b/libs/vkd3d/libs/vkd3d-shader/glsl.c
|
|
index 4d7505d8740..96c64a0e4c4 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/glsl.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/glsl.c
|
|
@@ -38,7 +38,7 @@ struct glsl_src
|
|
|
|
struct glsl_dst
|
|
{
|
|
- const struct vkd3d_shader_dst_param *vsir;
|
|
+ const struct vsir_dst_operand *vsir;
|
|
struct vkd3d_string_buffer *register_name;
|
|
struct vkd3d_string_buffer *mask;
|
|
};
|
|
@@ -67,8 +67,8 @@ struct vkd3d_glsl_generator
|
|
const struct vkd3d_shader_scan_combined_resource_sampler_info *combined_sampler_info;
|
|
};
|
|
|
|
-static void shader_glsl_print_subscript(struct vkd3d_string_buffer *buffer, struct vkd3d_glsl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *rel_addr, unsigned int offset);
|
|
+static void shader_glsl_print_subscript(struct vkd3d_string_buffer *buffer,
|
|
+ struct vkd3d_glsl_generator *gen, const struct vsir_src_operand *rel_addr, unsigned int offset);
|
|
|
|
static void VKD3D_PRINTF_FUNC(3, 4) vkd3d_glsl_compiler_error(
|
|
struct vkd3d_glsl_generator *generator,
|
|
@@ -389,7 +389,7 @@ static void shader_glsl_print_bitcast(struct vkd3d_string_buffer *dst, struct vk
|
|
}
|
|
|
|
static void shader_glsl_print_src(struct vkd3d_string_buffer *buffer, struct vkd3d_glsl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *vsir_src, uint32_t mask, enum vsir_data_type data_type)
|
|
+ const struct vsir_src_operand *vsir_src, uint32_t mask, enum vsir_data_type data_type)
|
|
{
|
|
const struct vkd3d_shader_register *reg = &vsir_src->reg;
|
|
struct vkd3d_string_buffer *register_name;
|
|
@@ -418,7 +418,7 @@ static void shader_glsl_print_src(struct vkd3d_string_buffer *buffer, struct vkd
|
|
}
|
|
|
|
static void glsl_src_init(struct glsl_src *glsl_src, struct vkd3d_glsl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *vsir_src, uint32_t mask)
|
|
+ const struct vsir_src_operand *vsir_src, uint32_t mask)
|
|
{
|
|
glsl_src->str = vkd3d_string_buffer_get(&gen->string_buffers);
|
|
shader_glsl_print_src(glsl_src->str, gen, vsir_src, mask, vsir_src->reg.data_type);
|
|
@@ -431,7 +431,7 @@ static void glsl_dst_cleanup(struct glsl_dst *dst, struct vkd3d_string_buffer_ca
|
|
}
|
|
|
|
static uint32_t glsl_dst_init(struct glsl_dst *glsl_dst, struct vkd3d_glsl_generator *gen,
|
|
- const struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_dst_param *vsir_dst)
|
|
+ const struct vkd3d_shader_instruction *ins, const struct vsir_dst_operand *vsir_dst)
|
|
{
|
|
uint32_t write_mask = vsir_dst->write_mask;
|
|
|
|
@@ -452,8 +452,8 @@ static uint32_t glsl_dst_init(struct glsl_dst *glsl_dst, struct vkd3d_glsl_gener
|
|
return write_mask;
|
|
}
|
|
|
|
-static void shader_glsl_print_subscript(struct vkd3d_string_buffer *buffer, struct vkd3d_glsl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *rel_addr, unsigned int offset)
|
|
+static void shader_glsl_print_subscript(struct vkd3d_string_buffer *buffer,
|
|
+ struct vkd3d_glsl_generator *gen, const struct vsir_src_operand *rel_addr, unsigned int offset)
|
|
{
|
|
struct glsl_src r;
|
|
|
|
@@ -862,7 +862,7 @@ static void shader_glsl_ld(struct vkd3d_glsl_generator *gen, const struct vkd3d_
|
|
}
|
|
|
|
static void shader_glsl_print_shadow_coord(struct vkd3d_string_buffer *buffer, struct vkd3d_glsl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *coord, const struct vkd3d_shader_src_param *ref, unsigned int coord_size)
|
|
+ const struct vsir_src_operand *coord, const struct vsir_src_operand *ref, unsigned int coord_size)
|
|
{
|
|
uint32_t coord_mask = vkd3d_write_mask_from_component_count(coord_size);
|
|
|
|
@@ -896,9 +896,9 @@ static void shader_glsl_sample(struct vkd3d_glsl_generator *gen, const struct vk
|
|
{
|
|
bool shadow_sampler, array, bias, dynamic_offset, gather, grad, lod, lod_zero, offset, shadow;
|
|
const struct glsl_resource_type_info *resource_type_info;
|
|
- const struct vkd3d_shader_src_param *resource, *sampler;
|
|
unsigned int resource_id, resource_idx, resource_space;
|
|
unsigned int sampler_id, sampler_idx, sampler_space;
|
|
+ const struct vsir_src_operand *resource, *sampler;
|
|
const struct vkd3d_shader_descriptor_info1 *d;
|
|
enum vkd3d_shader_resource_type resource_type;
|
|
unsigned int component_idx, coord_size;
|
|
@@ -2481,8 +2481,8 @@ int glsl_compile(struct vsir_program *program, uint64_t config_flags,
|
|
return ret;
|
|
|
|
VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_SM6);
|
|
- VKD3D_ASSERT(program->has_descriptor_info);
|
|
- VKD3D_ASSERT(program->has_no_modifiers);
|
|
+ VKD3D_ASSERT(program->normalisation_flags.has_descriptor_info);
|
|
+ VKD3D_ASSERT(program->normalisation_flags.has_no_modifiers);
|
|
|
|
vkd3d_glsl_generator_init(&generator, program, compile_info,
|
|
combined_sampler_info, message_context);
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.c b/libs/vkd3d/libs/vkd3d-shader/hlsl.c
|
|
index 6bca2e1d1b2..641d25539a2 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.c
|
|
@@ -5296,6 +5296,8 @@ int hlsl_parse(const struct vkd3d_shader_compile_info *compile_info,
|
|
if (!vsir_program_init(program, compile_info, &version, 0, VSIR_CF_STRUCTURED, normalisation_level))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
+ program->f32_denorm_mode = VSIR_DENORM_FLUSH_TO_ZERO;
|
|
+
|
|
if ((ret = hlsl_ctx_parse(&ctx, &program->source_files, compile_info, profile, message_context)) < 0)
|
|
{
|
|
vsir_program_cleanup(program);
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl.y b/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
index e349029521a..4efa1cd2873 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl.y
|
|
@@ -544,16 +544,9 @@ static void check_loop_attributes(struct hlsl_ctx *ctx, const struct parse_attri
|
|
hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX, "Unroll attribute can't be used with 'fastopt' attribute.");
|
|
}
|
|
|
|
-static struct hlsl_default_value evaluate_static_expression(struct hlsl_ctx *ctx,
|
|
- struct hlsl_block *block, struct hlsl_type *dst_type, const struct vkd3d_shader_location *loc)
|
|
+static bool is_static_expression(struct hlsl_block *block)
|
|
{
|
|
- struct hlsl_default_value ret = {0};
|
|
struct hlsl_ir_node *node;
|
|
- struct hlsl_block expr;
|
|
- struct hlsl_src src;
|
|
-
|
|
- if (node_from_block(block)->data_type->class == HLSL_CLASS_ERROR)
|
|
- return ret;
|
|
|
|
LIST_FOR_EACH_ENTRY(node, &block->instrs, struct hlsl_ir_node, entry)
|
|
{
|
|
@@ -582,12 +575,28 @@ static struct hlsl_default_value evaluate_static_expression(struct hlsl_ctx *ctx
|
|
case HLSL_IR_SWITCH:
|
|
case HLSL_IR_STATEBLOCK_CONSTANT:
|
|
case HLSL_IR_SYNC:
|
|
- hlsl_error(ctx, &node->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
|
|
- "Expected literal expression.");
|
|
- break;
|
|
+ return false;
|
|
}
|
|
}
|
|
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static struct hlsl_default_value evaluate_static_expression(struct hlsl_ctx *ctx,
|
|
+ struct hlsl_block *block, struct hlsl_type *dst_type, const struct vkd3d_shader_location *loc)
|
|
+{
|
|
+ struct hlsl_default_value ret = {0};
|
|
+ struct hlsl_ir_node *node;
|
|
+ struct hlsl_block expr;
|
|
+ struct hlsl_src src;
|
|
+
|
|
+ if (node_from_block(block)->data_type->class == HLSL_CLASS_ERROR)
|
|
+ return ret;
|
|
+
|
|
+ if (!is_static_expression(block))
|
|
+ hlsl_error(ctx, &node_from_block(block)->loc, VKD3D_SHADER_ERROR_HLSL_INVALID_SYNTAX,
|
|
+ "Expected literal expression.");
|
|
+
|
|
if (!hlsl_clone_block(ctx, &expr, &ctx->static_initializers))
|
|
return ret;
|
|
hlsl_block_add_block(&expr, block);
|
|
@@ -2670,7 +2679,7 @@ static struct hlsl_block *initialize_vars(struct hlsl_ctx *ctx, struct list *var
|
|
|
|
if (v->initializer.args_count)
|
|
{
|
|
- bool is_default_values_initializer;
|
|
+ bool is_default_values_initializer, static_initialization;
|
|
|
|
is_default_values_initializer = (ctx->cur_buffer != ctx->globals_buffer)
|
|
|| (var->storage_modifiers & HLSL_STORAGE_UNIFORM)
|
|
@@ -2680,6 +2689,10 @@ static struct hlsl_block *initialize_vars(struct hlsl_ctx *ctx, struct list *var
|
|
if (hlsl_type_is_shader(type))
|
|
is_default_values_initializer = false;
|
|
|
|
+ static_initialization = var->storage_modifiers & HLSL_STORAGE_STATIC
|
|
+ || (var->data_type->modifiers & HLSL_MODIFIER_CONST
|
|
+ && is_static_expression(v->initializer.instrs));
|
|
+
|
|
if (is_default_values_initializer)
|
|
{
|
|
/* Default values might have been allocated already for another variable of the same name,
|
|
@@ -2708,7 +2721,7 @@ static struct hlsl_block *initialize_vars(struct hlsl_ctx *ctx, struct list *var
|
|
{
|
|
hlsl_dump_var_default_values(var);
|
|
}
|
|
- else if (var->storage_modifiers & HLSL_STORAGE_STATIC)
|
|
+ else if (static_initialization)
|
|
{
|
|
hlsl_block_add_block(&ctx->static_initializers, v->initializer.instrs);
|
|
}
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
|
|
index f1368b151aa..55d7f1f7c55 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/hlsl_codegen.c
|
|
@@ -1413,24 +1413,49 @@ static struct hlsl_ir_node *lower_matrix_swizzles(struct hlsl_ctx *ctx,
|
|
return hlsl_block_add_simple_load(ctx, block, var, &instr->loc);
|
|
}
|
|
|
|
-/* hlsl_ir_index nodes are a parse-time construct used to represent array indexing and struct
|
|
- * record access before knowing if they will be used in the lhs of an assignment --in which case
|
|
- * they are lowered into a deref-- or as the load of an element within a larger value.
|
|
- * For the latter case, this pass takes care of lowering hlsl_ir_indexes into individual
|
|
- * hlsl_ir_loads, or individual hlsl_ir_resource_loads, in case the indexing is a
|
|
- * resource access. */
|
|
-static struct hlsl_ir_node *lower_index_loads(struct hlsl_ctx *ctx,
|
|
- struct hlsl_ir_node *instr, struct hlsl_block *block)
|
|
+/* Usually when INDEX nodes are constructed, it's a direct variable load
|
|
+ * followed by the INDEX. As described below in lower_index_load(), we know in
|
|
+ * that case that the variable in question is unmodified and we can convert the
|
|
+ * INDEX to a LOAD of the same variable instead of copying it to a temp.
|
|
+ * This function is an unsophisticated heuristic meant to detect this case.
|
|
+ *
|
|
+ * For various reasons there may be CONSTANT or EXPR instructions between the
|
|
+ * two, so we have to search until we find the source node. */
|
|
+static bool is_indexed_value_known_unmodified(const struct hlsl_block *block, const struct hlsl_ir_index *index)
|
|
+{
|
|
+ const struct list *entry = &index->node.entry;
|
|
+
|
|
+ while ((entry = list_prev(&block->instrs, entry)))
|
|
+ {
|
|
+ const struct hlsl_ir_node *instr = LIST_ENTRY(entry, struct hlsl_ir_node, entry);
|
|
+
|
|
+ if (instr == index->val.node)
|
|
+ return true;
|
|
+
|
|
+ switch (instr->type)
|
|
+ {
|
|
+ case HLSL_IR_CONSTANT:
|
|
+ case HLSL_IR_EXPR:
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static struct hlsl_ir_node *lower_index_load(struct hlsl_ctx *ctx, struct hlsl_ir_index *index,
|
|
+ struct hlsl_block *block, struct hlsl_block *containing_block)
|
|
{
|
|
+ struct hlsl_ir_node *instr = &index->node;
|
|
+ const struct hlsl_deref *deref;
|
|
struct hlsl_deref var_deref;
|
|
- struct hlsl_ir_index *index;
|
|
struct hlsl_ir_load *load;
|
|
struct hlsl_ir_node *val;
|
|
struct hlsl_ir_var *var;
|
|
|
|
- if (instr->type != HLSL_IR_INDEX)
|
|
- return NULL;
|
|
- index = hlsl_ir_index(instr);
|
|
val = index->val.node;
|
|
|
|
if (hlsl_index_is_resource_access(index))
|
|
@@ -1519,11 +1544,46 @@ static struct hlsl_ir_node *lower_index_loads(struct hlsl_ctx *ctx,
|
|
}
|
|
}
|
|
|
|
- if (!(var = hlsl_new_synthetic_var(ctx, "index-val", val->data_type, &instr->loc)))
|
|
- return NULL;
|
|
- hlsl_init_simple_deref_from_var(&var_deref, var);
|
|
+ /* Indexed values don't have to be variable loads, but a LOAD must be of a
|
|
+ * variable, so we may need to copy the indexed value to a synthetic
|
|
+ * variable first.
|
|
+ * Even if an INDEX is of a variable load, due to the structure of our IR,
|
|
+ * it's legal for that variable to have been modified between the LOAD and
|
|
+ * the INDEX. For example, we can have a sequence like:
|
|
+ *
|
|
+ * 2: x
|
|
+ * 3: x = 1
|
|
+ * 4: @2[...]
|
|
+ *
|
|
+ * Because the defined semantics of the IR are essentially "pass by value",
|
|
+ * we can't just convert @4 into a LOAD of x. We have to copy it into a
|
|
+ * synthetic temp first.
|
|
+ *
|
|
+ * This situation generally doesn't actually happen with the IR that comes
|
|
+ * from parsing, but it can happen in certain cases related to function
|
|
+ * calls.
|
|
+ *
|
|
+ * Always creating an extra copy is fine in theory, since copy propagation
|
|
+ * will later undo it. Some of these variables can be extremely large,
|
|
+ * however, such that we can observe a noticeable speed improvement by
|
|
+ * avoiding the copy in the first place. */
|
|
+
|
|
+ if (val->type == HLSL_IR_LOAD && is_indexed_value_known_unmodified(containing_block, index))
|
|
+ {
|
|
+ /* Note that in a chain of indices only the first will be a LOAD.
|
|
+ * However, because we convert from top to bottom, and replace as we go,
|
|
+ * we should end up catching every index in a chain this way. */
|
|
+ deref = &hlsl_ir_load(val)->src;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if (!(var = hlsl_new_synthetic_var(ctx, "index-val", val->data_type, &instr->loc)))
|
|
+ return NULL;
|
|
+ hlsl_init_simple_deref_from_var(&var_deref, var);
|
|
+ deref = &var_deref;
|
|
|
|
- hlsl_block_add_simple_store(ctx, block, var, val);
|
|
+ hlsl_block_add_simple_store(ctx, block, var, val);
|
|
+ }
|
|
|
|
if (hlsl_index_is_noncontiguous(index))
|
|
{
|
|
@@ -1543,7 +1603,7 @@ static struct hlsl_ir_node *lower_index_loads(struct hlsl_ctx *ctx,
|
|
|
|
c = hlsl_block_add_uint_constant(ctx, block, i, &instr->loc);
|
|
|
|
- if (!(load = hlsl_new_load_index(ctx, &var_deref, c, &instr->loc)))
|
|
+ if (!(load = hlsl_new_load_index(ctx, deref, c, &instr->loc)))
|
|
return NULL;
|
|
hlsl_block_add_instr(block, &load->node);
|
|
|
|
@@ -1557,7 +1617,67 @@ static struct hlsl_ir_node *lower_index_loads(struct hlsl_ctx *ctx,
|
|
return hlsl_block_add_simple_load(ctx, block, var, &instr->loc);
|
|
}
|
|
|
|
- return hlsl_block_add_load_index(ctx, block, &var_deref, index->idx.node, &instr->loc);
|
|
+ return hlsl_block_add_load_index(ctx, block, deref, index->idx.node, &instr->loc);
|
|
+}
|
|
+
|
|
+/* hlsl_ir_index nodes are a parse-time construct used to represent array
|
|
+ * indexing and struct record access before knowing if they will be used in the
|
|
+ * LHS of an assignment—in which case they are lowered into a deref—or as the
|
|
+ * load of an element within a larger value.
|
|
+ * For the latter case, this pass takes care of lowering hlsl_ir_indexes into
|
|
+ * individual hlsl_ir_load or hlsl_ir_resource_load. */
|
|
+void hlsl_lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_block *block)
|
|
+{
|
|
+ struct hlsl_ir_node *instr, *next;
|
|
+
|
|
+ LIST_FOR_EACH_ENTRY_SAFE(instr, next, &block->instrs, struct hlsl_ir_node, entry)
|
|
+ {
|
|
+ switch (instr->type)
|
|
+ {
|
|
+ case HLSL_IR_INDEX:
|
|
+ {
|
|
+ struct hlsl_ir_node *replacement;
|
|
+ struct hlsl_block new_block;
|
|
+
|
|
+ hlsl_block_init(&new_block);
|
|
+ if ((replacement = lower_index_load(ctx, hlsl_ir_index(instr), &new_block, block)))
|
|
+ {
|
|
+ list_move_before(&instr->entry, &new_block.instrs);
|
|
+ hlsl_replace_node(instr, replacement);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ hlsl_block_cleanup(&new_block);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ case HLSL_IR_IF:
|
|
+ {
|
|
+ struct hlsl_ir_if *iff = hlsl_ir_if(instr);
|
|
+ hlsl_lower_index_loads(ctx, &iff->then_block);
|
|
+ hlsl_lower_index_loads(ctx, &iff->else_block);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ case HLSL_IR_LOOP:
|
|
+ hlsl_lower_index_loads(ctx, &hlsl_ir_loop(instr)->body);
|
|
+ break;
|
|
+
|
|
+ case HLSL_IR_SWITCH:
|
|
+ {
|
|
+ struct hlsl_ir_switch *s = hlsl_ir_switch(instr);
|
|
+ struct hlsl_ir_switch_case *c;
|
|
+
|
|
+ LIST_FOR_EACH_ENTRY(c, &s->cases, struct hlsl_ir_switch_case, entry)
|
|
+ hlsl_lower_index_loads(ctx, &c->body);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
/* Lower casts from vec1 to vecN to swizzles. */
|
|
@@ -6439,8 +6559,9 @@ static void register_deref_usage(struct hlsl_ctx *ctx, const struct hlsl_deref *
|
|
else if (regset == HLSL_REGSET_NUMERIC)
|
|
{
|
|
type = hlsl_deref_get_type(ctx, deref);
|
|
+ VKD3D_ASSERT(type->class <= HLSL_CLASS_VECTOR);
|
|
|
|
- required_bind_count = align(index + type->reg_size[regset], 4) / 4;
|
|
+ required_bind_count = align(index + type->e.numeric.dimx, 4) / 4;
|
|
var->bind_count[regset] = max(var->bind_count[regset], required_bind_count);
|
|
}
|
|
else
|
|
@@ -7753,6 +7874,10 @@ bool hlsl_regset_index_from_deref(struct hlsl_ctx *ctx, const struct hlsl_deref
|
|
*index += 4 * idx;
|
|
break;
|
|
|
|
+ case HLSL_CLASS_VECTOR:
|
|
+ *index += idx;
|
|
+ break;
|
|
+
|
|
default:
|
|
vkd3d_unreachable();
|
|
}
|
|
@@ -8511,11 +8636,6 @@ static void remove_unreachable_code(struct hlsl_ctx *ctx, struct hlsl_block *bod
|
|
}
|
|
}
|
|
|
|
-void hlsl_lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_block *body)
|
|
-{
|
|
- replace_ir(ctx, lower_index_loads, body);
|
|
-}
|
|
-
|
|
static enum hlsl_ir_expr_op invert_comparison_op(enum hlsl_ir_expr_op op)
|
|
{
|
|
switch (op)
|
|
@@ -9082,12 +9202,12 @@ static uint32_t generate_vsir_get_src_swizzle(uint32_t src_writemask, uint32_t d
|
|
return swizzle;
|
|
}
|
|
|
|
-static void sm1_generate_vsir_constant_defs(struct hlsl_ctx *ctx, struct vsir_program *program,
|
|
- struct hlsl_block *block)
|
|
+static void sm1_generate_vsir_constant_defs(struct hlsl_ctx *ctx,
|
|
+ struct vsir_program *program, struct hlsl_block *block)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
+ struct vsir_dst_operand *dst;
|
|
unsigned int i, x;
|
|
|
|
for (i = 0; i < ctx->constant_defs.count; ++i)
|
|
@@ -9106,22 +9226,20 @@ static void sm1_generate_vsir_constant_defs(struct hlsl_ctx *ctx, struct vsir_pr
|
|
return;
|
|
}
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- vsir_register_init(&dst_param->reg, VKD3DSPR_CONST, VSIR_DATA_F32, 1);
|
|
- ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- ins->dst[0].reg.idx[0].offset = constant_reg->index;
|
|
- ins->dst[0].write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
+ dst = &ins->dst[0];
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_CONST, VSIR_DATA_F32, 1);
|
|
+ dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ dst->reg.idx[0].offset = constant_reg->index;
|
|
+ dst->write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
|
|
- src_param = &ins->src[0];
|
|
- vsir_register_init(&src_param->reg, VKD3DSPR_IMMCONST, VSIR_DATA_F32, 0);
|
|
- src_param->reg.type = VKD3DSPR_IMMCONST;
|
|
- src_param->reg.precision = VKD3D_SHADER_REGISTER_PRECISION_DEFAULT;
|
|
- src_param->reg.non_uniform = false;
|
|
- src_param->reg.data_type = VSIR_DATA_F32;
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src = &ins->src[0];
|
|
+ vsir_src_operand_init(src, VKD3DSPR_IMMCONST, VSIR_DATA_F32, 0);
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
for (x = 0; x < 4; ++x)
|
|
- src_param->reg.u.immconst_f32[x] = constant_reg->value.f[x];
|
|
- src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
+ {
|
|
+ src->reg.u.immconst_f32[x] = constant_reg->value.f[x];
|
|
+ }
|
|
+ src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
}
|
|
}
|
|
|
|
@@ -9130,10 +9248,10 @@ static void sm1_generate_vsir_sampler_dcls(struct hlsl_ctx *ctx,
|
|
{
|
|
enum vkd3d_shader_resource_type resource_type;
|
|
struct vkd3d_shader_register_range *range;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct vkd3d_shader_semantic *semantic;
|
|
struct vkd3d_shader_instruction *ins;
|
|
enum hlsl_sampler_dim sampler_dim;
|
|
+ struct vsir_dst_operand *dst;
|
|
struct hlsl_ir_var *var;
|
|
unsigned int i, count;
|
|
|
|
@@ -9184,14 +9302,14 @@ static void sm1_generate_vsir_sampler_dcls(struct hlsl_ctx *ctx,
|
|
semantic = &ins->declaration.semantic;
|
|
semantic->resource_type = resource_type;
|
|
|
|
- dst_param = &semantic->resource.reg;
|
|
- vsir_register_init(&dst_param->reg, VKD3DSPR_COMBINED_SAMPLER, VSIR_DATA_F32, 1);
|
|
- dst_param->reg.dimension = VSIR_DIMENSION_NONE;
|
|
- dst_param->reg.idx[0].offset = var->regs[HLSL_REGSET_SAMPLERS].index + i;
|
|
- dst_param->write_mask = 0;
|
|
+ dst = &semantic->resource.reg;
|
|
+ vsir_register_init(&dst->reg, VKD3DSPR_COMBINED_SAMPLER, VSIR_DATA_F32, 1);
|
|
+ dst->reg.dimension = VSIR_DIMENSION_NONE;
|
|
+ dst->reg.idx[0].offset = var->regs[HLSL_REGSET_SAMPLERS].index + i;
|
|
+ dst->write_mask = 0;
|
|
range = &semantic->resource.range;
|
|
range->space = 0;
|
|
- range->first = range->last = dst_param->reg.idx[0].offset;
|
|
+ range->first = range->last = dst->reg.idx[0].offset;
|
|
}
|
|
}
|
|
}
|
|
@@ -9252,13 +9370,13 @@ static struct vkd3d_shader_instruction *generate_vsir_add_program_instruction(st
|
|
return ins;
|
|
}
|
|
|
|
-static void vsir_src_from_hlsl_constant_value(struct vkd3d_shader_src_param *src,
|
|
+static void vsir_src_from_hlsl_constant_value(struct vsir_src_operand *src,
|
|
struct hlsl_ctx *ctx, const struct hlsl_constant_value *value,
|
|
enum vsir_data_type type, unsigned int width, unsigned int map_writemask)
|
|
{
|
|
unsigned int i, j;
|
|
|
|
- vsir_src_param_init(src, VKD3DSPR_IMMCONST, type, 0);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_IMMCONST, type, 0);
|
|
if (width == 1)
|
|
{
|
|
src->reg.u.immconst_u32[0] = value->u[0].u;
|
|
@@ -9276,8 +9394,8 @@ static void vsir_src_from_hlsl_constant_value(struct vkd3d_shader_src_param *src
|
|
}
|
|
}
|
|
|
|
-static void vsir_src_from_hlsl_node(struct vkd3d_shader_src_param *src,
|
|
- struct hlsl_ctx *ctx, const struct hlsl_ir_node *instr, uint32_t map_writemask)
|
|
+static void vsir_src_from_hlsl_node(struct vsir_src_operand *src, struct hlsl_ctx *ctx,
|
|
+ const struct hlsl_ir_node *instr, uint32_t map_writemask)
|
|
{
|
|
struct hlsl_ir_constant *constant;
|
|
|
|
@@ -9297,12 +9415,12 @@ static void vsir_src_from_hlsl_node(struct vkd3d_shader_src_param *src,
|
|
}
|
|
}
|
|
|
|
-static struct vkd3d_shader_src_param *sm4_generate_vsir_new_idx_src(struct hlsl_ctx *ctx,
|
|
+static struct vsir_src_operand *sm4_generate_vsir_new_idx_src(struct hlsl_ctx *ctx,
|
|
struct vsir_program *program, const struct hlsl_ir_node *rel_offset)
|
|
{
|
|
- struct vkd3d_shader_src_param *idx_src;
|
|
+ struct vsir_src_operand *idx_src;
|
|
|
|
- if (!(idx_src = vsir_program_get_src_params(program, 1)))
|
|
+ if (!(idx_src = vsir_program_get_src_operands(program, 1)))
|
|
{
|
|
ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
return NULL;
|
|
@@ -9526,36 +9644,38 @@ static bool sm4_generate_vsir_reg_from_deref(struct hlsl_ctx *ctx, struct vsir_p
|
|
return true;
|
|
}
|
|
|
|
-static bool sm4_generate_vsir_init_src_param_from_deref(struct hlsl_ctx *ctx, struct vsir_program *program,
|
|
- struct vkd3d_shader_src_param *src_param, const struct hlsl_deref *deref,
|
|
+static bool sm4_generate_vsir_init_src_operand_from_deref(struct hlsl_ctx *ctx,
|
|
+ struct vsir_program *program, struct vsir_src_operand *src, const struct hlsl_deref *deref,
|
|
unsigned int dst_writemask, const struct vkd3d_shader_location *loc)
|
|
{
|
|
uint32_t writemask;
|
|
|
|
- if (!sm4_generate_vsir_reg_from_deref(ctx, program, &src_param->reg, &writemask, deref))
|
|
+ if (!sm4_generate_vsir_reg_from_deref(ctx, program, &src->reg, &writemask, deref))
|
|
return false;
|
|
- if (src_param->reg.dimension != VSIR_DIMENSION_NONE)
|
|
- src_param->swizzle = generate_vsir_get_src_swizzle(writemask, dst_writemask);
|
|
+ if (src->reg.dimension != VSIR_DIMENSION_NONE)
|
|
+ src->swizzle = generate_vsir_get_src_swizzle(writemask, dst_writemask);
|
|
+
|
|
return true;
|
|
}
|
|
|
|
-static bool sm4_generate_vsir_init_dst_param_from_deref(struct hlsl_ctx *ctx, struct vsir_program *program,
|
|
- struct vkd3d_shader_dst_param *dst_param, const struct hlsl_deref *deref,
|
|
+static bool sm4_generate_vsir_init_dst_operand_from_deref(struct hlsl_ctx *ctx,
|
|
+ struct vsir_program *program, struct vsir_dst_operand *dst, const struct hlsl_deref *deref,
|
|
const struct vkd3d_shader_location *loc, unsigned int writemask)
|
|
{
|
|
uint32_t reg_writemask;
|
|
|
|
- if (!sm4_generate_vsir_reg_from_deref(ctx, program, &dst_param->reg, ®_writemask, deref))
|
|
+ if (!sm4_generate_vsir_reg_from_deref(ctx, program, &dst->reg, ®_writemask, deref))
|
|
return false;
|
|
- dst_param->write_mask = hlsl_combine_writemasks(reg_writemask, writemask);
|
|
+ dst->write_mask = hlsl_combine_writemasks(reg_writemask, writemask);
|
|
+
|
|
return true;
|
|
}
|
|
|
|
-static void vsir_dst_from_hlsl_node(struct vkd3d_shader_dst_param *dst,
|
|
+static void vsir_dst_from_hlsl_node(struct vsir_dst_operand *dst,
|
|
struct hlsl_ctx *ctx, const struct hlsl_ir_node *instr)
|
|
{
|
|
VKD3D_ASSERT(instr->reg.allocated);
|
|
- vsir_dst_param_init(dst, instr->reg.type, vsir_data_type_from_hlsl_instruction(ctx, instr), 1);
|
|
+ vsir_dst_operand_init(dst, instr->reg.type, vsir_data_type_from_hlsl_instruction(ctx, instr), 1);
|
|
dst->reg.idx[0].offset = instr->reg.id;
|
|
dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
dst->write_mask = instr->reg.writemask;
|
|
@@ -9565,8 +9685,8 @@ static void sm1_generate_vsir_instr_constant(struct hlsl_ctx *ctx,
|
|
struct vsir_program *program, struct hlsl_ir_constant *constant)
|
|
{
|
|
struct hlsl_ir_node *instr = &constant->node;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
|
|
VKD3D_ASSERT(instr->reg.allocated);
|
|
VKD3D_ASSERT(constant->reg.allocated);
|
|
@@ -9574,11 +9694,11 @@ static void sm1_generate_vsir_instr_constant(struct hlsl_ctx *ctx,
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
|
|
return;
|
|
|
|
- src_param = &ins->src[0];
|
|
- vsir_register_init(&src_param->reg, VKD3DSPR_CONST, VSIR_DATA_F32, 1);
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- src_param->reg.idx[0].offset = constant->reg.id;
|
|
- src_param->swizzle = generate_vsir_get_src_swizzle(constant->reg.writemask, instr->reg.writemask);
|
|
+ src = &ins->src[0];
|
|
+ vsir_src_operand_init(src, VKD3DSPR_CONST, VSIR_DATA_F32, 1);
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->reg.idx[0].offset = constant->reg.id;
|
|
+ src->swizzle = generate_vsir_get_src_swizzle(constant->reg.writemask, instr->reg.writemask);
|
|
|
|
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
|
|
}
|
|
@@ -9586,9 +9706,9 @@ static void sm1_generate_vsir_instr_constant(struct hlsl_ctx *ctx,
|
|
static void sm4_generate_vsir_rasterizer_sample_count(struct hlsl_ctx *ctx,
|
|
struct vsir_program *program, struct hlsl_ir_expr *expr)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct hlsl_ir_node *instr = &expr->node;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_SAMPLE_INFO, 1, 1)))
|
|
return;
|
|
@@ -9596,10 +9716,10 @@ static void sm4_generate_vsir_rasterizer_sample_count(struct hlsl_ctx *ctx,
|
|
|
|
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
|
|
|
|
- src_param = &ins->src[0];
|
|
- vsir_src_param_init(src_param, VKD3DSPR_RASTERIZER, VSIR_DATA_UNUSED, 0);
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- src_param->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
+ src = &ins->src[0];
|
|
+ vsir_src_operand_init(src, VKD3DSPR_RASTERIZER, VSIR_DATA_UNUSED, 0);
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
}
|
|
|
|
/* Translate ops that can be mapped to a single vsir instruction with only one dst register. */
|
|
@@ -9608,10 +9728,10 @@ static void generate_vsir_instr_expr_single_instr_op(struct hlsl_ctx *ctx,
|
|
uint32_t src_mod, uint32_t dst_mod, bool map_src_swizzles)
|
|
{
|
|
struct hlsl_ir_node *instr = &expr->node;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
unsigned int i, src_count = 0;
|
|
+ struct vsir_src_operand *src;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
VKD3D_ASSERT(instr->reg.allocated);
|
|
|
|
@@ -9625,18 +9745,17 @@ static void generate_vsir_instr_expr_single_instr_op(struct hlsl_ctx *ctx,
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, opcode, 1, src_count)))
|
|
return;
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- vsir_dst_from_hlsl_node(dst_param, ctx, instr);
|
|
- dst_param->modifiers = dst_mod;
|
|
+ dst = &ins->dst[0];
|
|
+ vsir_dst_from_hlsl_node(dst, ctx, instr);
|
|
+ dst->modifiers = dst_mod;
|
|
|
|
for (i = 0; i < src_count; ++i)
|
|
{
|
|
struct hlsl_ir_node *operand = expr->operands[i].node;
|
|
|
|
- src_param = &ins->src[i];
|
|
- vsir_src_from_hlsl_node(src_param, ctx, operand,
|
|
- map_src_swizzles ? dst_param->write_mask : VKD3DSP_WRITEMASK_ALL);
|
|
- src_param->modifiers = src_mod;
|
|
+ src = &ins->src[i];
|
|
+ vsir_src_from_hlsl_node(src, ctx, operand, map_src_swizzles ? dst->write_mask : VKD3DSP_WRITEMASK_ALL);
|
|
+ src->modifiers = src_mod;
|
|
}
|
|
}
|
|
|
|
@@ -9647,9 +9766,9 @@ static void sm1_generate_vsir_instr_expr_per_component_instr_op(struct hlsl_ctx
|
|
{
|
|
struct hlsl_ir_node *operand = expr->operands[0].node;
|
|
struct hlsl_ir_node *instr = &expr->node;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
+ struct vsir_dst_operand *dst;
|
|
uint32_t src_swizzle;
|
|
unsigned int i, c;
|
|
|
|
@@ -9664,18 +9783,18 @@ static void sm1_generate_vsir_instr_expr_per_component_instr_op(struct hlsl_ctx
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, opcode, 1, 1)))
|
|
return;
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- vsir_register_init(&dst_param->reg, instr->reg.type, VSIR_DATA_F32, 1);
|
|
- dst_param->reg.idx[0].offset = instr->reg.id;
|
|
- dst_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- dst_param->write_mask = 1u << i;
|
|
+ dst = &ins->dst[0];
|
|
+ vsir_dst_operand_init(dst, instr->reg.type, VSIR_DATA_F32, 1);
|
|
+ dst->reg.idx[0].offset = instr->reg.id;
|
|
+ dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ dst->write_mask = 1u << i;
|
|
|
|
- src_param = &ins->src[0];
|
|
- vsir_register_init(&src_param->reg, operand->reg.type, VSIR_DATA_F32, 1);
|
|
- src_param->reg.idx[0].offset = operand->reg.id;
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src = &ins->src[0];
|
|
+ vsir_src_operand_init(src, operand->reg.type, VSIR_DATA_F32, 1);
|
|
+ src->reg.idx[0].offset = operand->reg.id;
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
c = vsir_swizzle_get_component(src_swizzle, i);
|
|
- src_param->swizzle = vsir_swizzle_from_writemask(1u << c);
|
|
+ src->swizzle = vsir_swizzle_from_writemask(1u << c);
|
|
}
|
|
}
|
|
}
|
|
@@ -9685,8 +9804,8 @@ static void sm1_generate_vsir_instr_expr_sincos(struct hlsl_ctx *ctx, struct vsi
|
|
{
|
|
struct hlsl_ir_node *operand = expr->operands[0].node;
|
|
struct hlsl_ir_node *instr = &expr->node;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
unsigned int src_count = 0;
|
|
|
|
VKD3D_ASSERT(instr->reg.allocated);
|
|
@@ -9700,17 +9819,17 @@ static void sm1_generate_vsir_instr_expr_sincos(struct hlsl_ctx *ctx, struct vsi
|
|
|
|
if (ctx->profile->major_version < 3)
|
|
{
|
|
- src_param = &ins->src[1];
|
|
- vsir_register_init(&src_param->reg, VKD3DSPR_CONST, VSIR_DATA_F32, 1);
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- src_param->reg.idx[0].offset = ctx->d3dsincosconst1.id;
|
|
- src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
+ src = &ins->src[1];
|
|
+ vsir_src_operand_init(src, VKD3DSPR_CONST, VSIR_DATA_F32, 1);
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->reg.idx[0].offset = ctx->d3dsincosconst1.id;
|
|
+ src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
|
|
- src_param = &ins->src[2];
|
|
- vsir_register_init(&src_param->reg, VKD3DSPR_CONST, VSIR_DATA_F32, 1);
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- src_param->reg.idx[0].offset = ctx->d3dsincosconst2.id;
|
|
- src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
+ src = &ins->src[2];
|
|
+ vsir_src_operand_init(src, VKD3DSPR_CONST, VSIR_DATA_F32, 1);
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->reg.idx[0].offset = ctx->d3dsincosconst2.id;
|
|
+ src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
}
|
|
}
|
|
|
|
@@ -9981,9 +10100,8 @@ err:
|
|
return false;
|
|
}
|
|
|
|
-static void sm1_generate_vsir_init_dst_param_from_deref(struct hlsl_ctx *ctx,
|
|
- struct vkd3d_shader_dst_param *dst_param, struct hlsl_deref *deref,
|
|
- const struct vkd3d_shader_location *loc, unsigned int writemask)
|
|
+static void sm1_generate_vsir_init_dst_operand_from_deref(struct hlsl_ctx *ctx, struct vsir_dst_operand *dst,
|
|
+ struct hlsl_deref *deref, const struct vkd3d_shader_location *loc, unsigned int writemask)
|
|
{
|
|
enum vkd3d_shader_register_type type = VKD3DSPR_TEMP;
|
|
struct vkd3d_shader_version version;
|
|
@@ -10030,16 +10148,16 @@ static void sm1_generate_vsir_init_dst_param_from_deref(struct hlsl_ctx *ctx,
|
|
|
|
if (type == VKD3DSPR_DEPTHOUT)
|
|
{
|
|
- vsir_register_init(&dst_param->reg, type, VSIR_DATA_F32, 0);
|
|
- dst_param->reg.dimension = VSIR_DIMENSION_SCALAR;
|
|
+ vsir_register_init(&dst->reg, type, VSIR_DATA_F32, 0);
|
|
+ dst->reg.dimension = VSIR_DIMENSION_SCALAR;
|
|
}
|
|
else
|
|
{
|
|
- vsir_register_init(&dst_param->reg, type, VSIR_DATA_F32, 1);
|
|
- dst_param->reg.idx[0].offset = register_index;
|
|
- dst_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ vsir_register_init(&dst->reg, type, VSIR_DATA_F32, 1);
|
|
+ dst->reg.idx[0].offset = register_index;
|
|
+ dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
}
|
|
- dst_param->write_mask = writemask;
|
|
+ dst->write_mask = writemask;
|
|
|
|
if (deref->rel_offset.node)
|
|
hlsl_fixme(ctx, loc, "Translate relative addressing on dst register for vsir.");
|
|
@@ -10049,29 +10167,28 @@ static void sm1_generate_vsir_instr_mova(struct hlsl_ctx *ctx,
|
|
struct vsir_program *program, struct hlsl_ir_node *instr)
|
|
{
|
|
enum vkd3d_shader_opcode opcode = hlsl_version_ge(ctx, 2, 0) ? VSIR_OP_MOVA : VSIR_OP_MOV;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
VKD3D_ASSERT(instr->reg.allocated);
|
|
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, opcode, 1, 1)))
|
|
return;
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- vsir_register_init(&dst_param->reg, VKD3DSPR_ADDR, VSIR_DATA_F32, 0);
|
|
- dst_param->write_mask = VKD3DSP_WRITEMASK_0;
|
|
+ dst = &ins->dst[0];
|
|
+ vsir_register_init(&dst->reg, VKD3DSPR_ADDR, VSIR_DATA_F32, 0);
|
|
+ dst->write_mask = VKD3DSP_WRITEMASK_0;
|
|
|
|
VKD3D_ASSERT(instr->data_type->class <= HLSL_CLASS_VECTOR);
|
|
VKD3D_ASSERT(instr->data_type->e.numeric.dimx == 1);
|
|
vsir_src_from_hlsl_node(&ins->src[0], ctx, instr, VKD3DSP_WRITEMASK_ALL);
|
|
}
|
|
|
|
-static struct vkd3d_shader_src_param *sm1_generate_vsir_new_address_src(struct hlsl_ctx *ctx,
|
|
- struct vsir_program *program)
|
|
+static struct vsir_src_operand *sm1_generate_vsir_new_address_src(struct hlsl_ctx *ctx, struct vsir_program *program)
|
|
{
|
|
- struct vkd3d_shader_src_param *idx_src;
|
|
+ struct vsir_src_operand *idx_src;
|
|
|
|
- if (!(idx_src = vsir_program_get_src_params(program, 1)))
|
|
+ if (!(idx_src = vsir_program_get_src_operands(program, 1)))
|
|
{
|
|
ctx->result = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
return NULL;
|
|
@@ -10084,12 +10201,12 @@ static struct vkd3d_shader_src_param *sm1_generate_vsir_new_address_src(struct h
|
|
return idx_src;
|
|
}
|
|
|
|
-static void sm1_generate_vsir_init_src_param_from_deref(struct hlsl_ctx *ctx,
|
|
- struct vsir_program *program, struct vkd3d_shader_src_param *src_param,
|
|
- struct hlsl_deref *deref, uint32_t dst_writemask, const struct vkd3d_shader_location *loc)
|
|
+static void sm1_generate_vsir_init_src_operand_from_deref(struct hlsl_ctx *ctx,
|
|
+ struct vsir_program *program, struct vsir_src_operand *src, struct hlsl_deref *deref,
|
|
+ uint32_t dst_writemask, const struct vkd3d_shader_location *loc)
|
|
{
|
|
enum vkd3d_shader_register_type type = VKD3DSPR_TEMP;
|
|
- struct vkd3d_shader_src_param *src_rel_addr = NULL;
|
|
+ struct vsir_src_operand *src_rel_addr = NULL;
|
|
struct vkd3d_shader_version version;
|
|
uint32_t register_index;
|
|
unsigned int writemask;
|
|
@@ -10157,11 +10274,11 @@ static void sm1_generate_vsir_init_src_param_from_deref(struct hlsl_ctx *ctx,
|
|
writemask = reg.writemask;
|
|
}
|
|
|
|
- vsir_register_init(&src_param->reg, type, VSIR_DATA_F32, 1);
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- src_param->reg.idx[0].offset = register_index;
|
|
- src_param->reg.idx[0].rel_addr = src_rel_addr;
|
|
- src_param->swizzle = generate_vsir_get_src_swizzle(writemask, dst_writemask);
|
|
+ vsir_src_operand_init(src, type, VSIR_DATA_F32, 1);
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->reg.idx[0].offset = register_index;
|
|
+ src->reg.idx[0].rel_addr = src_rel_addr;
|
|
+ src->swizzle = generate_vsir_get_src_swizzle(writemask, dst_writemask);
|
|
}
|
|
|
|
static void sm1_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_program *program,
|
|
@@ -10180,7 +10297,7 @@ static void sm1_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_progr
|
|
|
|
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
|
|
|
|
- sm1_generate_vsir_init_src_param_from_deref(ctx, program, &ins->src[0],
|
|
+ sm1_generate_vsir_init_src_operand_from_deref(ctx, program, &ins->src[0],
|
|
&load->src, ins->dst[0].write_mask, &ins->location);
|
|
}
|
|
|
|
@@ -10191,7 +10308,6 @@ static void sm1_generate_vsir_instr_resource_load(struct hlsl_ctx *ctx,
|
|
struct hlsl_ir_node *ddx = load->ddx.node;
|
|
struct hlsl_ir_node *ddy = load->ddy.node;
|
|
struct hlsl_ir_node *instr = &load->node;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
enum vkd3d_shader_opcode opcode;
|
|
unsigned int src_count = 2;
|
|
@@ -10235,19 +10351,14 @@ static void sm1_generate_vsir_instr_resource_load(struct hlsl_ctx *ctx,
|
|
|
|
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
|
|
|
|
- src_param = &ins->src[0];
|
|
- vsir_src_from_hlsl_node(src_param, ctx, coords, VKD3DSP_WRITEMASK_ALL);
|
|
-
|
|
- sm1_generate_vsir_init_src_param_from_deref(ctx, program, &ins->src[1], &load->resource,
|
|
- VKD3DSP_WRITEMASK_ALL, &ins->location);
|
|
+ vsir_src_from_hlsl_node(&ins->src[0], ctx, coords, VKD3DSP_WRITEMASK_ALL);
|
|
+ sm1_generate_vsir_init_src_operand_from_deref(ctx, program, &ins->src[1],
|
|
+ &load->resource, VKD3DSP_WRITEMASK_ALL, &ins->location);
|
|
|
|
if (load->load_type == HLSL_RESOURCE_SAMPLE_GRAD)
|
|
{
|
|
- src_param = &ins->src[2];
|
|
- vsir_src_from_hlsl_node(src_param, ctx, ddx, VKD3DSP_WRITEMASK_ALL);
|
|
-
|
|
- src_param = &ins->src[3];
|
|
- vsir_src_from_hlsl_node(src_param, ctx, ddy, VKD3DSP_WRITEMASK_ALL);
|
|
+ vsir_src_from_hlsl_node(&ins->src[2], ctx, ddx, VKD3DSP_WRITEMASK_ALL);
|
|
+ vsir_src_from_hlsl_node(&ins->src[3], ctx, ddy, VKD3DSP_WRITEMASK_ALL);
|
|
}
|
|
}
|
|
|
|
@@ -10255,8 +10366,8 @@ static void generate_vsir_instr_swizzle(struct hlsl_ctx *ctx,
|
|
struct vsir_program *program, struct hlsl_ir_swizzle *swizzle_instr)
|
|
{
|
|
struct hlsl_ir_node *instr = &swizzle_instr->node, *val = swizzle_instr->val.node;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
uint32_t swizzle;
|
|
|
|
VKD3D_ASSERT(instr->reg.allocated);
|
|
@@ -10270,29 +10381,27 @@ static void generate_vsir_instr_swizzle(struct hlsl_ctx *ctx,
|
|
swizzle = hlsl_combine_swizzles(swizzle, swizzle_instr->u.vector, instr->data_type->e.numeric.dimx);
|
|
swizzle = hlsl_map_swizzle(swizzle, ins->dst[0].write_mask);
|
|
|
|
- src_param = &ins->src[0];
|
|
+ src = &ins->src[0];
|
|
VKD3D_ASSERT(val->type != HLSL_IR_CONSTANT);
|
|
- vsir_register_init(&src_param->reg, val->reg.type, vsir_data_type_from_hlsl_instruction(ctx, val), 1);
|
|
- src_param->reg.idx[0].offset = val->reg.id;
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- src_param->swizzle = swizzle;
|
|
+ vsir_src_operand_init(src, val->reg.type, vsir_data_type_from_hlsl_instruction(ctx, val), 1);
|
|
+ src->reg.idx[0].offset = val->reg.id;
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->swizzle = swizzle;
|
|
}
|
|
|
|
-static void sm1_generate_vsir_instr_store(struct hlsl_ctx *ctx, struct vsir_program *program,
|
|
- struct hlsl_ir_store *store)
|
|
+static void sm1_generate_vsir_instr_store(struct hlsl_ctx *ctx,
|
|
+ struct vsir_program *program, struct hlsl_ir_store *store)
|
|
{
|
|
struct hlsl_ir_node *rhs = store->rhs.node;
|
|
struct hlsl_ir_node *instr = &store->node;
|
|
struct vkd3d_shader_instruction *ins;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
|
|
return;
|
|
|
|
- sm1_generate_vsir_init_dst_param_from_deref(ctx, &ins->dst[0], &store->lhs, &ins->location, store->writemask);
|
|
+ sm1_generate_vsir_init_dst_operand_from_deref(ctx, &ins->dst[0], &store->lhs, &ins->location, store->writemask);
|
|
|
|
- src_param = &ins->src[0];
|
|
- vsir_src_from_hlsl_node(src_param, ctx, rhs, ins->dst[0].write_mask);
|
|
+ vsir_src_from_hlsl_node(&ins->src[0], ctx, rhs, ins->dst[0].write_mask);
|
|
}
|
|
|
|
static void sm1_generate_vsir_instr_jump(struct hlsl_ctx *ctx,
|
|
@@ -10322,9 +10431,9 @@ static void sm1_generate_vsir_block(struct hlsl_ctx *ctx, struct hlsl_block *blo
|
|
static void sm1_generate_vsir_instr_if(struct hlsl_ctx *ctx, struct vsir_program *program, struct hlsl_ir_if *iff)
|
|
{
|
|
struct hlsl_ir_node *condition = iff->condition.node;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct hlsl_ir_node *instr = &iff->node;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
|
|
/* Conditional branches should have already been flattened for SM < 2.1. */
|
|
VKD3D_ASSERT(hlsl_version_ge(ctx, 2, 1));
|
|
@@ -10335,13 +10444,13 @@ static void sm1_generate_vsir_instr_if(struct hlsl_ctx *ctx, struct vsir_program
|
|
return;
|
|
ins->flags = VKD3D_SHADER_REL_OP_NE;
|
|
|
|
- src_param = &ins->src[0];
|
|
- vsir_src_from_hlsl_node(src_param, ctx, condition, VKD3DSP_WRITEMASK_ALL);
|
|
- src_param->modifiers = 0;
|
|
+ src = &ins->src[0];
|
|
+ vsir_src_from_hlsl_node(src, ctx, condition, VKD3DSP_WRITEMASK_ALL);
|
|
+ src->modifiers = 0;
|
|
|
|
- src_param = &ins->src[1];
|
|
- vsir_src_from_hlsl_node(src_param, ctx, condition, VKD3DSP_WRITEMASK_ALL);
|
|
- src_param->modifiers = VKD3DSPSM_NEG;
|
|
+ src = &ins->src[1];
|
|
+ vsir_src_from_hlsl_node(src, ctx, condition, VKD3DSP_WRITEMASK_ALL);
|
|
+ src->modifiers = VKD3DSPSM_NEG;
|
|
|
|
sm1_generate_vsir_block(ctx, &iff->then_block, program);
|
|
|
|
@@ -10931,10 +11040,10 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
|
|
const bool is_primitive = hlsl_type_is_primitive_array(var->data_type);
|
|
const bool output = var->is_output_semantic;
|
|
enum vkd3d_shader_sysval_semantic semantic;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
enum vkd3d_shader_register_type type;
|
|
enum vkd3d_shader_opcode opcode;
|
|
+ struct vsir_dst_operand *dst;
|
|
unsigned int idx = 0;
|
|
uint32_t write_mask;
|
|
bool has_idx;
|
|
@@ -11013,44 +11122,44 @@ static void sm4_generate_vsir_instr_dcl_semantic(struct hlsl_ctx *ctx, struct vs
|
|
{
|
|
VKD3D_ASSERT(semantic == VKD3D_SHADER_SV_NONE || semantic == VKD3D_SHADER_SV_TARGET
|
|
|| version->type == VKD3D_SHADER_TYPE_HULL || type != VKD3DSPR_OUTPUT);
|
|
- dst_param = &ins->declaration.dst;
|
|
+ dst = &ins->declaration.dst;
|
|
}
|
|
else if (opcode == VSIR_OP_DCL_INPUT || opcode == VSIR_OP_DCL_INPUT_PS)
|
|
{
|
|
VKD3D_ASSERT(semantic == VKD3D_SHADER_SV_NONE || is_primitive || version->type == VKD3D_SHADER_TYPE_GEOMETRY);
|
|
- dst_param = &ins->declaration.dst;
|
|
+ dst = &ins->declaration.dst;
|
|
}
|
|
else
|
|
{
|
|
VKD3D_ASSERT(semantic != VKD3D_SHADER_SV_NONE);
|
|
ins->declaration.register_semantic.sysval_semantic = vkd3d_siv_from_sysval_indexed(semantic,
|
|
var->semantic.index);
|
|
- dst_param = &ins->declaration.register_semantic.reg;
|
|
+ dst = &ins->declaration.register_semantic.reg;
|
|
}
|
|
|
|
if (is_primitive)
|
|
{
|
|
VKD3D_ASSERT(has_idx);
|
|
- vsir_register_init(&dst_param->reg, type, VSIR_DATA_F32, 2);
|
|
- dst_param->reg.idx[0].offset = var->data_type->e.array.elements_count;
|
|
- dst_param->reg.idx[1].offset = idx;
|
|
+ vsir_register_init(&dst->reg, type, VSIR_DATA_F32, 2);
|
|
+ dst->reg.idx[0].offset = var->data_type->e.array.elements_count;
|
|
+ dst->reg.idx[1].offset = idx;
|
|
}
|
|
else if (has_idx)
|
|
{
|
|
- vsir_register_init(&dst_param->reg, type, VSIR_DATA_F32, 1);
|
|
- dst_param->reg.idx[0].offset = idx;
|
|
+ vsir_register_init(&dst->reg, type, VSIR_DATA_F32, 1);
|
|
+ dst->reg.idx[0].offset = idx;
|
|
}
|
|
else
|
|
{
|
|
- vsir_register_init(&dst_param->reg, type, VSIR_DATA_F32, 0);
|
|
+ vsir_register_init(&dst->reg, type, VSIR_DATA_F32, 0);
|
|
}
|
|
|
|
- if (shader_sm4_is_scalar_register(&dst_param->reg))
|
|
- dst_param->reg.dimension = VSIR_DIMENSION_SCALAR;
|
|
+ if (shader_sm4_is_scalar_register(&dst->reg))
|
|
+ dst->reg.dimension = VSIR_DIMENSION_SCALAR;
|
|
else
|
|
- dst_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
|
|
- dst_param->write_mask = write_mask;
|
|
+ dst->write_mask = write_mask;
|
|
|
|
if (var->is_input_semantic && version->type == VKD3D_SHADER_TYPE_PIXEL)
|
|
ins->flags = get_interpolation_mode(version, var->data_type, var->storage_modifiers);
|
|
@@ -11094,20 +11203,20 @@ static void sm4_generate_vsir_cast_from_bool(struct hlsl_ctx *ctx, struct vsir_p
|
|
{
|
|
struct hlsl_ir_node *operand = expr->operands[0].node;
|
|
const struct hlsl_ir_node *instr = &expr->node;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct hlsl_constant_value value = {0};
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
VKD3D_ASSERT(instr->reg.allocated);
|
|
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_AND, 1, 2)))
|
|
return;
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- vsir_dst_from_hlsl_node(dst_param, ctx, instr);
|
|
+ dst = &ins->dst[0];
|
|
+ vsir_dst_from_hlsl_node(dst, ctx, instr);
|
|
ins->dst[0].reg.data_type = VSIR_DATA_U32;
|
|
|
|
- vsir_src_from_hlsl_node(&ins->src[0], ctx, operand, dst_param->write_mask);
|
|
+ vsir_src_from_hlsl_node(&ins->src[0], ctx, operand, dst->write_mask);
|
|
|
|
value.u[0].u = bits;
|
|
vsir_src_from_hlsl_constant_value(&ins->src[1], ctx, &value, VSIR_DATA_U32, 1, 0);
|
|
@@ -11224,8 +11333,8 @@ static void sm4_generate_vsir_expr_with_two_destinations(struct hlsl_ctx *ctx, s
|
|
enum vkd3d_shader_opcode opcode, const struct hlsl_ir_expr *expr, unsigned int dst_idx)
|
|
{
|
|
const struct hlsl_ir_node *instr = &expr->node;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_dst_operand *dst;
|
|
unsigned int i, src_count;
|
|
|
|
VKD3D_ASSERT(instr->reg.allocated);
|
|
@@ -11239,13 +11348,13 @@ static void sm4_generate_vsir_expr_with_two_destinations(struct hlsl_ctx *ctx, s
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, opcode, 2, src_count)))
|
|
return;
|
|
|
|
- dst_param = &ins->dst[dst_idx];
|
|
- vsir_dst_from_hlsl_node(dst_param, ctx, instr);
|
|
+ dst = &ins->dst[dst_idx];
|
|
+ vsir_dst_from_hlsl_node(dst, ctx, instr);
|
|
|
|
- vsir_dst_param_init_null(&ins->dst[1 - dst_idx]);
|
|
+ vsir_dst_operand_init_null(&ins->dst[1 - dst_idx]);
|
|
|
|
for (i = 0; i < src_count; ++i)
|
|
- vsir_src_from_hlsl_node(&ins->src[i], ctx, expr->operands[i].node, dst_param->write_mask);
|
|
+ vsir_src_from_hlsl_node(&ins->src[i], ctx, expr->operands[i].node, dst->write_mask);
|
|
}
|
|
|
|
static void sm4_generate_vsir_rcp_using_div(struct hlsl_ctx *ctx,
|
|
@@ -11253,26 +11362,26 @@ static void sm4_generate_vsir_rcp_using_div(struct hlsl_ctx *ctx,
|
|
{
|
|
struct hlsl_ir_node *operand = expr->operands[0].node;
|
|
const struct hlsl_ir_node *instr = &expr->node;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct hlsl_constant_value value = {0};
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
VKD3D_ASSERT(type_is_float(expr->node.data_type));
|
|
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_DIV, 1, 2)))
|
|
return;
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- vsir_dst_from_hlsl_node(dst_param, ctx, instr);
|
|
+ dst = &ins->dst[0];
|
|
+ vsir_dst_from_hlsl_node(dst, ctx, instr);
|
|
|
|
value.u[0].f = 1.0f;
|
|
value.u[1].f = 1.0f;
|
|
value.u[2].f = 1.0f;
|
|
value.u[3].f = 1.0f;
|
|
vsir_src_from_hlsl_constant_value(&ins->src[0], ctx, &value,
|
|
- VSIR_DATA_F32, instr->data_type->e.numeric.dimx, dst_param->write_mask);
|
|
+ VSIR_DATA_F32, instr->data_type->e.numeric.dimx, dst->write_mask);
|
|
|
|
- vsir_src_from_hlsl_node(&ins->src[1], ctx, operand, dst_param->write_mask);
|
|
+ vsir_src_from_hlsl_node(&ins->src[1], ctx, operand, dst->write_mask);
|
|
}
|
|
|
|
static bool sm4_generate_vsir_instr_expr(struct hlsl_ctx *ctx,
|
|
@@ -11772,22 +11881,20 @@ static bool sm4_generate_vsir_instr_store(struct hlsl_ctx *ctx,
|
|
struct vsir_program *program, struct hlsl_ir_store *store)
|
|
{
|
|
struct hlsl_ir_node *instr = &store->node;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
VKD3D_ASSERT(!store->lhs.var->is_tgsm);
|
|
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
|
|
return false;
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- if (!sm4_generate_vsir_init_dst_param_from_deref(ctx, program,
|
|
- dst_param, &store->lhs, &instr->loc, store->writemask))
|
|
+ dst = &ins->dst[0];
|
|
+ if (!sm4_generate_vsir_init_dst_operand_from_deref(ctx, program,
|
|
+ dst, &store->lhs, &instr->loc, store->writemask))
|
|
return false;
|
|
|
|
- src_param = &ins->src[0];
|
|
- vsir_src_from_hlsl_node(src_param, ctx, store->rhs.node, dst_param->write_mask);
|
|
+ vsir_src_from_hlsl_node(&ins->src[0], ctx, store->rhs.node, dst->write_mask);
|
|
|
|
return true;
|
|
}
|
|
@@ -11807,10 +11914,10 @@ static bool sm4_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_progr
|
|
{
|
|
const struct vkd3d_shader_version *version = &program->shader_version;
|
|
const struct hlsl_type *type = load->node.data_type;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct hlsl_ir_node *instr = &load->node;
|
|
struct vkd3d_shader_instruction *ins;
|
|
struct hlsl_constant_value value;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
VKD3D_ASSERT(!load->src.var->is_tgsm);
|
|
VKD3D_ASSERT(hlsl_is_numeric_type(type));
|
|
@@ -11822,30 +11929,30 @@ static bool sm4_generate_vsir_instr_load(struct hlsl_ctx *ctx, struct vsir_progr
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOVC, 1, 3)))
|
|
return false;
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- vsir_dst_from_hlsl_node(dst_param, ctx, instr);
|
|
+ dst = &ins->dst[0];
|
|
+ vsir_dst_from_hlsl_node(dst, ctx, instr);
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program,
|
|
- &ins->src[0], &load->src, dst_param->write_mask, &instr->loc))
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
+ &ins->src[0], &load->src, dst->write_mask, &instr->loc))
|
|
return false;
|
|
|
|
memset(&value, 0xff, sizeof(value));
|
|
vsir_src_from_hlsl_constant_value(&ins->src[1], ctx, &value,
|
|
- VSIR_DATA_U32, type->e.numeric.dimx, dst_param->write_mask);
|
|
+ VSIR_DATA_U32, type->e.numeric.dimx, dst->write_mask);
|
|
memset(&value, 0x00, sizeof(value));
|
|
vsir_src_from_hlsl_constant_value(&ins->src[2], ctx, &value,
|
|
- VSIR_DATA_U32, type->e.numeric.dimx, dst_param->write_mask);
|
|
+ VSIR_DATA_U32, type->e.numeric.dimx, dst->write_mask);
|
|
}
|
|
else
|
|
{
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_MOV, 1, 1)))
|
|
return false;
|
|
|
|
- dst_param = &ins->dst[0];
|
|
- vsir_dst_from_hlsl_node(dst_param, ctx, instr);
|
|
+ dst = &ins->dst[0];
|
|
+ vsir_dst_from_hlsl_node(dst, ctx, instr);
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program,
|
|
- &ins->src[0], &load->src, dst_param->write_mask, &instr->loc))
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
+ &ins->src[0], &load->src, dst->write_mask, &instr->loc))
|
|
return false;
|
|
}
|
|
return true;
|
|
@@ -11878,8 +11985,8 @@ static bool sm4_generate_vsir_instr_resource_store(struct hlsl_ctx *ctx,
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &store->node.loc, opcode, 0, 1)))
|
|
return false;
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program, &ins->src[0],
|
|
- &store->resource, VKD3DSP_WRITEMASK_ALL, &instr->loc))
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
+ &ins->src[0], &store->resource, VKD3DSP_WRITEMASK_ALL, &instr->loc))
|
|
return false;
|
|
|
|
return true;
|
|
@@ -11908,8 +12015,8 @@ static bool sm4_generate_vsir_instr_resource_store(struct hlsl_ctx *ctx,
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_STORE_RAW, 1, 2)))
|
|
return false;
|
|
|
|
- if (!sm4_generate_vsir_init_dst_param_from_deref(ctx, program, &ins->dst[0],
|
|
- &store->resource, &instr->loc, store->writemask))
|
|
+ if (!sm4_generate_vsir_init_dst_operand_from_deref(ctx, program,
|
|
+ &ins->dst[0], &store->resource, &instr->loc, store->writemask))
|
|
return false;
|
|
}
|
|
else
|
|
@@ -11917,7 +12024,7 @@ static bool sm4_generate_vsir_instr_resource_store(struct hlsl_ctx *ctx,
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &instr->loc, VSIR_OP_STORE_UAV_TYPED, 1, 2)))
|
|
return false;
|
|
|
|
- if (!sm4_generate_vsir_init_dst_param_from_deref(ctx, program,
|
|
+ if (!sm4_generate_vsir_init_dst_operand_from_deref(ctx, program,
|
|
&ins->dst[0], &store->resource, &instr->loc, VKD3DSP_WRITEMASK_ALL))
|
|
return false;
|
|
}
|
|
@@ -12038,7 +12145,7 @@ static bool sm4_generate_vsir_instr_ld(struct hlsl_ctx *ctx,
|
|
|
|
vsir_src_from_hlsl_node(&ins->src[0], ctx, coords, coords_writemask);
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program,
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
&ins->src[structured ? 2 : 1], resource, ins->dst[0].write_mask, &instr->loc))
|
|
return false;
|
|
|
|
@@ -12123,12 +12230,12 @@ static bool sm4_generate_vsir_instr_sample(struct hlsl_ctx *ctx,
|
|
|
|
vsir_src_from_hlsl_node(&ins->src[0], ctx, coords, VKD3DSP_WRITEMASK_ALL);
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program, &ins->src[1],
|
|
- resource, ins->dst[0].write_mask, &instr->loc))
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
+ &ins->src[1], resource, ins->dst[0].write_mask, &instr->loc))
|
|
return false;
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program, &ins->src[2],
|
|
- sampler, VKD3DSP_WRITEMASK_ALL, &instr->loc))
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
+ &ins->src[2], sampler, VKD3DSP_WRITEMASK_ALL, &instr->loc))
|
|
return false;
|
|
|
|
if (opcode == VSIR_OP_SAMPLE_LOD || opcode == VSIR_OP_SAMPLE_B)
|
|
@@ -12189,11 +12296,11 @@ static bool sm4_generate_vsir_instr_gather(struct hlsl_ctx *ctx, struct vsir_pro
|
|
else
|
|
sm4_generate_vsir_encode_texel_offset_as_aoffimmi(ins, texel_offset);
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program,
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
&ins->src[current_arg++], resource, ins->dst[0].write_mask, &instr->loc))
|
|
return false;
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program,
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
&ins->src[current_arg], sampler, VKD3DSP_WRITEMASK_ALL, &instr->loc))
|
|
return false;
|
|
ins->src[current_arg].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
@@ -12224,7 +12331,7 @@ static bool sm4_generate_vsir_instr_sample_info(struct hlsl_ctx *ctx,
|
|
|
|
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program,
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
&ins->src[0], resource, ins->dst[0].write_mask, &instr->loc))
|
|
return false;
|
|
|
|
@@ -12258,7 +12365,7 @@ static bool sm4_generate_vsir_instr_resinfo(struct hlsl_ctx *ctx,
|
|
|
|
vsir_src_from_hlsl_node(&ins->src[0], ctx, load->lod.node, VKD3DSP_WRITEMASK_ALL);
|
|
|
|
- if (!sm4_generate_vsir_init_src_param_from_deref(ctx, program,
|
|
+ if (!sm4_generate_vsir_init_src_operand_from_deref(ctx, program,
|
|
&ins->src[1], resource, ins->dst[0].write_mask, &instr->loc))
|
|
return false;
|
|
|
|
@@ -12378,9 +12485,9 @@ static bool sm4_generate_vsir_instr_interlocked(struct hlsl_ctx *ctx,
|
|
struct hlsl_ir_node *coords = interlocked->coords.node;
|
|
struct hlsl_ir_node *instr = &interlocked->node;
|
|
bool is_imm = interlocked->node.reg.allocated;
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
enum vkd3d_shader_opcode opcode;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
opcode = is_imm ? imm_opcodes[interlocked->op] : opcodes[interlocked->op];
|
|
|
|
@@ -12403,10 +12510,10 @@ static bool sm4_generate_vsir_instr_interlocked(struct hlsl_ctx *ctx,
|
|
if (is_imm)
|
|
vsir_dst_from_hlsl_node(&ins->dst[0], ctx, instr);
|
|
|
|
- dst_param = is_imm ? &ins->dst[1] : &ins->dst[0];
|
|
- if (!sm4_generate_vsir_init_dst_param_from_deref(ctx, program, dst_param, &interlocked->dst, &instr->loc, 0))
|
|
+ dst = is_imm ? &ins->dst[1] : &ins->dst[0];
|
|
+ if (!sm4_generate_vsir_init_dst_operand_from_deref(ctx, program, dst, &interlocked->dst, &instr->loc, 0))
|
|
return false;
|
|
- dst_param->reg.dimension = VSIR_DIMENSION_NONE;
|
|
+ dst->reg.dimension = VSIR_DIMENSION_NONE;
|
|
|
|
vsir_src_from_hlsl_node(&ins->src[0], ctx, coords, VKD3DSP_WRITEMASK_ALL);
|
|
if (cmp_value)
|
|
@@ -12957,8 +13064,8 @@ static void sm4_generate_vsir_add_dcl_constant_buffer(struct hlsl_ctx *ctx,
|
|
{
|
|
unsigned int array_first = cbuffer->reg.index;
|
|
unsigned int array_last = cbuffer->reg.index; /* FIXME: array end. */
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
|
|
if (!(ins = generate_vsir_add_program_instruction(ctx, program, &cbuffer->loc, VSIR_OP_DCL_CONSTANT_BUFFER, 0, 0)))
|
|
{
|
|
@@ -12967,27 +13074,24 @@ static void sm4_generate_vsir_add_dcl_constant_buffer(struct hlsl_ctx *ctx,
|
|
}
|
|
|
|
ins->declaration.cb.size = align(cbuffer->size, 4) * sizeof(float);
|
|
-
|
|
- src_param = &ins->declaration.cb.src;
|
|
- vsir_src_param_init(src_param, VKD3DSPR_CONSTBUFFER, VSIR_DATA_F32, 0);
|
|
- src_param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
-
|
|
ins->declaration.cb.range.space = cbuffer->reg.space;
|
|
ins->declaration.cb.range.first = array_first;
|
|
ins->declaration.cb.range.last = array_last;
|
|
|
|
- src_param->reg.idx[0].offset = cbuffer->reg.id;
|
|
- src_param->reg.idx[1].offset = array_first;
|
|
- src_param->reg.idx[2].offset = array_last;
|
|
- src_param->reg.idx_count = 3;
|
|
+ src = &ins->declaration.cb.src;
|
|
+ vsir_src_operand_init(src, VKD3DSPR_CONSTBUFFER, VSIR_DATA_F32, 3);
|
|
+ src->reg.idx[0].offset = cbuffer->reg.id;
|
|
+ src->reg.idx[1].offset = array_first;
|
|
+ src->reg.idx[2].offset = array_last;
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
}
|
|
|
|
static void sm4_generate_vsir_add_dcl_sampler(struct hlsl_ctx *ctx,
|
|
struct vsir_program *program, const struct extern_resource *resource)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_src_operand *src;
|
|
unsigned int i;
|
|
|
|
VKD3D_ASSERT(resource->regset == HLSL_REGSET_SAMPLERS);
|
|
@@ -13010,17 +13114,15 @@ static void sm4_generate_vsir_add_dcl_sampler(struct hlsl_ctx *ctx,
|
|
if (resource->component_type->sampler_dim == HLSL_SAMPLER_DIM_COMPARISON)
|
|
ins->flags |= VKD3DSI_SAMPLER_COMPARISON_MODE;
|
|
|
|
- src_param = &ins->declaration.sampler.src;
|
|
- vsir_src_param_init(src_param, VKD3DSPR_SAMPLER, VSIR_DATA_UNUSED, 0);
|
|
-
|
|
ins->declaration.sampler.range.first = array_first;
|
|
ins->declaration.sampler.range.last = array_last;
|
|
ins->declaration.sampler.range.space = resource->space;
|
|
|
|
- src_param->reg.idx[0].offset = resource->id + i;
|
|
- src_param->reg.idx[1].offset = array_first;
|
|
- src_param->reg.idx[2].offset = array_last;
|
|
- src_param->reg.idx_count = 3;
|
|
+ src = &ins->declaration.sampler.src;
|
|
+ vsir_src_operand_init(src, VKD3DSPR_SAMPLER, VSIR_DATA_UNUSED, 3);
|
|
+ src->reg.idx[0].offset = resource->id + i;
|
|
+ src->reg.idx[1].offset = array_first;
|
|
+ src->reg.idx[2].offset = array_last;
|
|
}
|
|
}
|
|
|
|
@@ -13156,7 +13258,7 @@ static void sm4_generate_vsir_add_dcl_texture(struct hlsl_ctx *ctx,
|
|
else
|
|
vsir_resource = &ins->declaration.semantic.resource;
|
|
|
|
- vsir_dst_param_init(&vsir_resource->reg, uav ? VKD3DSPR_UAV : VKD3DSPR_RESOURCE, VSIR_DATA_UNUSED, 0);
|
|
+ vsir_dst_operand_init(&vsir_resource->reg, uav ? VKD3DSPR_UAV : VKD3DSPR_RESOURCE, VSIR_DATA_UNUSED, 0);
|
|
|
|
if (uav && component_type->e.resource.rasteriser_ordered)
|
|
ins->flags = VKD3DSUF_RASTERISER_ORDERED_VIEW;
|
|
@@ -13206,8 +13308,8 @@ static void sm4_generate_vsir_add_dcl_texture(struct hlsl_ctx *ctx,
|
|
static void sm4_generate_vsir_add_dcl_tgsm(struct hlsl_ctx *ctx,
|
|
struct vsir_program *program, const struct hlsl_ir_var *var)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst_param;
|
|
struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_dst_operand *dst;
|
|
|
|
if (!hlsl_is_numeric_type(var->data_type))
|
|
{
|
|
@@ -13221,11 +13323,11 @@ static void sm4_generate_vsir_add_dcl_tgsm(struct hlsl_ctx *ctx,
|
|
return;
|
|
}
|
|
|
|
- dst_param = &ins->declaration.tgsm_raw.reg;
|
|
+ dst = &ins->declaration.tgsm_raw.reg;
|
|
|
|
- vsir_dst_param_init(dst_param, VKD3DSPR_GROUPSHAREDMEM, VSIR_DATA_F32, 1);
|
|
- dst_param->reg.dimension = VSIR_DIMENSION_NONE;
|
|
- dst_param->reg.idx[0].offset = var->regs[HLSL_REGSET_NUMERIC].id;
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_GROUPSHAREDMEM, VSIR_DATA_F32, 1);
|
|
+ dst->reg.dimension = VSIR_DIMENSION_NONE;
|
|
+ dst->reg.idx[0].offset = var->regs[HLSL_REGSET_NUMERIC].id;
|
|
|
|
ins->declaration.tgsm_raw.byte_count = var->data_type->reg_size[HLSL_REGSET_NUMERIC] * 4;
|
|
ins->declaration.tgsm_raw.zero_init = false;
|
|
@@ -13242,7 +13344,7 @@ static void sm4_generate_vsir_add_dcl_stream(struct hlsl_ctx *ctx,
|
|
return;
|
|
}
|
|
|
|
- vsir_src_param_init(&ins->src[0], VKD3DSPR_STREAM, VSIR_DATA_UNUSED, 1);
|
|
+ vsir_src_operand_init(&ins->src[0], VKD3DSPR_STREAM, VSIR_DATA_UNUSED, 1);
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_NONE;
|
|
ins->src[0].reg.idx[0].offset = var->regs[HLSL_REGSET_STREAM_OUTPUTS].index;
|
|
}
|
|
@@ -13478,7 +13580,7 @@ static void generate_vsir_descriptors(struct hlsl_ctx *ctx, struct vsir_program
|
|
}
|
|
}
|
|
|
|
- program->has_descriptor_info = true;
|
|
+ program->normalisation_flags.has_descriptor_info = true;
|
|
}
|
|
|
|
/* For some reason, for matrices, values from default value initializers end
|
|
@@ -14927,7 +15029,7 @@ static void process_entry_function(struct hlsl_ctx *ctx, struct list *semantic_v
|
|
|
|
replace_ir(ctx, lower_complex_casts, body);
|
|
replace_ir(ctx, lower_matrix_swizzles, body);
|
|
- replace_ir(ctx, lower_index_loads, body);
|
|
+ hlsl_lower_index_loads(ctx, body);
|
|
|
|
replace_ir(ctx, lower_tgsm_loads, body);
|
|
replace_ir(ctx, lower_tgsm_stores, body);
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/ir.c b/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
index 6a1c5303eb4..0261ba88989 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/ir.c
|
|
@@ -524,8 +524,8 @@ bool vsir_program_add_icb(struct vsir_program *program, struct vkd3d_shader_imme
|
|
return true;
|
|
}
|
|
|
|
-static struct vkd3d_shader_src_param *vsir_program_clone_src_params(
|
|
- struct vsir_program *program, const struct vkd3d_shader_src_param *params, size_t count);
|
|
+static struct vsir_src_operand *vsir_program_clone_src_operands(
|
|
+ struct vsir_program *program, const struct vsir_src_operand *operands, size_t count);
|
|
|
|
static bool shader_register_clone_relative_addresses(struct vkd3d_shader_register *reg, struct vsir_program *program)
|
|
{
|
|
@@ -536,49 +536,49 @@ static bool shader_register_clone_relative_addresses(struct vkd3d_shader_registe
|
|
if (!reg->idx[i].rel_addr)
|
|
continue;
|
|
|
|
- if (!(reg->idx[i].rel_addr = vsir_program_clone_src_params(program, reg->idx[i].rel_addr, 1)))
|
|
+ if (!(reg->idx[i].rel_addr = vsir_program_clone_src_operands(program, reg->idx[i].rel_addr, 1)))
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
-static struct vkd3d_shader_dst_param *vsir_program_clone_dst_params(
|
|
- struct vsir_program *program, const struct vkd3d_shader_dst_param *params, size_t count)
|
|
+static struct vsir_dst_operand *vsir_program_clone_dst_operands(
|
|
+ struct vsir_program *program, const struct vsir_dst_operand *operands, size_t count)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst_params;
|
|
+ struct vsir_dst_operand *ret;
|
|
size_t i;
|
|
|
|
- if (!(dst_params = vsir_program_get_dst_params(program, count)))
|
|
+ if (!(ret = vsir_program_get_dst_operands(program, count)))
|
|
return NULL;
|
|
|
|
- memcpy(dst_params, params, count * sizeof(*params));
|
|
+ memcpy(ret, operands, count * sizeof(*operands));
|
|
for (i = 0; i < count; ++i)
|
|
{
|
|
- if (!shader_register_clone_relative_addresses(&dst_params[i].reg, program))
|
|
+ if (!shader_register_clone_relative_addresses(&ret[i].reg, program))
|
|
return NULL;
|
|
}
|
|
|
|
- return dst_params;
|
|
+ return ret;
|
|
}
|
|
|
|
-static struct vkd3d_shader_src_param *vsir_program_clone_src_params(
|
|
- struct vsir_program *program, const struct vkd3d_shader_src_param *params, size_t count)
|
|
+static struct vsir_src_operand *vsir_program_clone_src_operands(
|
|
+ struct vsir_program *program, const struct vsir_src_operand *operands, size_t count)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
+ struct vsir_src_operand *ret;
|
|
size_t i;
|
|
|
|
- if (!(src_params = vsir_program_get_src_params(program, count)))
|
|
+ if (!(ret = vsir_program_get_src_operands(program, count)))
|
|
return NULL;
|
|
|
|
- memcpy(src_params, params, count * sizeof(*params));
|
|
+ memcpy(ret, operands, count * sizeof(*operands));
|
|
for (i = 0; i < count; ++i)
|
|
{
|
|
- if (!shader_register_clone_relative_addresses(&src_params[i].reg, program))
|
|
+ if (!shader_register_clone_relative_addresses(&ret[i].reg, program))
|
|
return NULL;
|
|
}
|
|
|
|
- return src_params;
|
|
+ return ret;
|
|
}
|
|
|
|
static void shader_instruction_array_destroy(struct vkd3d_shader_instruction_array *array)
|
|
@@ -667,8 +667,8 @@ bool vsir_program_init(struct vsir_program *program, const struct vkd3d_shader_c
|
|
|
|
/* Size the parameter initial allocations so they are large enough for most shaders. The
|
|
* code path for chained allocations will be tested if a few shaders need to use it. */
|
|
- shader_param_allocator_init(&program->dst_params, reserve - reserve / 8u, sizeof(struct vkd3d_shader_dst_param));
|
|
- shader_param_allocator_init(&program->src_params, reserve * 2u, sizeof(struct vkd3d_shader_src_param));
|
|
+ shader_param_allocator_init(&program->dst_operands, reserve - reserve / 8u, sizeof(struct vsir_dst_operand));
|
|
+ shader_param_allocator_init(&program->src_operands, reserve * 2u, sizeof(struct vsir_src_operand));
|
|
if (!shader_instruction_array_init(&program->instructions, reserve))
|
|
{
|
|
if (program->free_parameters)
|
|
@@ -696,8 +696,8 @@ void vsir_program_cleanup(struct vsir_program *program)
|
|
shader_signature_cleanup(&program->output_signature);
|
|
shader_signature_cleanup(&program->patch_constant_signature);
|
|
vkd3d_shader_free_scan_descriptor_info1(&program->descriptors);
|
|
- shader_param_allocator_destroy(&program->src_params);
|
|
- shader_param_allocator_destroy(&program->dst_params);
|
|
+ shader_param_allocator_destroy(&program->src_operands);
|
|
+ shader_param_allocator_destroy(&program->dst_operands);
|
|
for (i = 0; i < program->icb_count; ++i)
|
|
{
|
|
vkd3d_free(program->icbs[i]);
|
|
@@ -749,6 +749,21 @@ bool vsir_signature_find_sysval(const struct shader_signature *signature,
|
|
return false;
|
|
}
|
|
|
|
+unsigned int vsir_signature_next_location(const struct shader_signature *signature)
|
|
+{
|
|
+ unsigned int i, max_row;
|
|
+
|
|
+ if (!signature)
|
|
+ return 0;
|
|
+
|
|
+ for (i = 0, max_row = 0; i < signature->element_count; ++i)
|
|
+ {
|
|
+ max_row = max(max_row, signature->elements[i].register_index + signature->elements[i].register_count);
|
|
+ }
|
|
+
|
|
+ return max_row;
|
|
+}
|
|
+
|
|
struct vkd3d_shader_descriptor_info1 *vsir_program_add_descriptor(struct vsir_program *program,
|
|
enum vkd3d_shader_descriptor_type type, unsigned int register_id,
|
|
const struct vkd3d_shader_register_range *range,
|
|
@@ -854,70 +869,70 @@ static inline bool shader_register_is_phase_instance_id(const struct vkd3d_shade
|
|
return reg->type == VKD3DSPR_FORKINSTID || reg->type == VKD3DSPR_JOININSTID;
|
|
}
|
|
|
|
-void vsir_src_param_init(struct vkd3d_shader_src_param *param, enum vkd3d_shader_register_type reg_type,
|
|
+void vsir_src_operand_init(struct vsir_src_operand *src, enum vkd3d_shader_register_type reg_type,
|
|
enum vsir_data_type data_type, unsigned int idx_count)
|
|
{
|
|
- vsir_register_init(¶m->reg, reg_type, data_type, idx_count);
|
|
- param->swizzle = 0;
|
|
- param->modifiers = VKD3DSPSM_NONE;
|
|
+ vsir_register_init(&src->reg, reg_type, data_type, idx_count);
|
|
+ src->swizzle = 0;
|
|
+ src->modifiers = VKD3DSPSM_NONE;
|
|
}
|
|
|
|
-static void src_param_init_const_uint(struct vkd3d_shader_src_param *src, uint32_t value)
|
|
+static void vsir_src_operand_init_const_u32(struct vsir_src_operand *src, uint32_t value)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
src->reg.u.immconst_u32[0] = value;
|
|
}
|
|
|
|
-static void vsir_src_param_init_io(struct vkd3d_shader_src_param *src,
|
|
+static void vsir_src_operand_init_io(struct vsir_src_operand *src,
|
|
enum vkd3d_shader_register_type reg_type, const struct signature_element *e, unsigned int idx_count)
|
|
{
|
|
- vsir_src_param_init(src, reg_type, vsir_data_type_from_component_type(e->component_type), idx_count);
|
|
+ vsir_src_operand_init(src, reg_type, vsir_data_type_from_component_type(e->component_type), idx_count);
|
|
src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
src->swizzle = vsir_swizzle_from_writemask(e->mask);
|
|
}
|
|
|
|
-void vsir_src_param_init_label(struct vkd3d_shader_src_param *param, unsigned int label_id)
|
|
+void vsir_src_operand_init_label(struct vsir_src_operand *src, unsigned int label_id)
|
|
{
|
|
- vsir_src_param_init(param, VKD3DSPR_LABEL, VSIR_DATA_UNUSED, 1);
|
|
- param->reg.dimension = VSIR_DIMENSION_NONE;
|
|
- param->reg.idx[0].offset = label_id;
|
|
+ vsir_src_operand_init(src, VKD3DSPR_LABEL, VSIR_DATA_UNUSED, 1);
|
|
+ src->reg.dimension = VSIR_DIMENSION_NONE;
|
|
+ src->reg.idx[0].offset = label_id;
|
|
}
|
|
|
|
-static void src_param_init_parameter(struct vkd3d_shader_src_param *src, uint32_t idx, enum vsir_data_type type)
|
|
+static void vsir_src_operand_init_parameter(struct vsir_src_operand *src, uint32_t idx, enum vsir_data_type type)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_PARAMETER, type, 1);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_PARAMETER, type, 1);
|
|
src->reg.idx[0].offset = idx;
|
|
}
|
|
|
|
-static void src_param_init_parameter_vec4(struct vkd3d_shader_src_param *src, uint32_t idx, enum vsir_data_type type)
|
|
+static void vsir_src_operand_init_parameter_vec4(struct vsir_src_operand *src, uint32_t idx, enum vsir_data_type type)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_PARAMETER, type, 1);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_PARAMETER, type, 1);
|
|
src->reg.idx[0].offset = idx;
|
|
src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
}
|
|
|
|
-static void vsir_src_param_init_resource(struct vkd3d_shader_src_param *src, unsigned int id, unsigned int idx)
|
|
+static void vsir_src_operand_init_resource(struct vsir_src_operand *src, unsigned int id, unsigned int idx)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_RESOURCE, VSIR_DATA_UNUSED, 2);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_RESOURCE, VSIR_DATA_UNUSED, 2);
|
|
src->reg.idx[0].offset = id;
|
|
src->reg.idx[1].offset = idx;
|
|
src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
}
|
|
|
|
-static void vsir_src_param_init_sampler(struct vkd3d_shader_src_param *src, unsigned int id, unsigned int idx)
|
|
+static void vsir_src_operand_init_sampler(struct vsir_src_operand *src, unsigned int id, unsigned int idx)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_SAMPLER, VSIR_DATA_UNUSED, 2);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_SAMPLER, VSIR_DATA_UNUSED, 2);
|
|
src->reg.idx[0].offset = id;
|
|
src->reg.idx[1].offset = idx;
|
|
src->reg.dimension = VSIR_DIMENSION_NONE;
|
|
}
|
|
|
|
-static void src_param_init_ssa(struct vkd3d_shader_src_param *src, unsigned int idx,
|
|
+static void vsir_src_operand_init_ssa(struct vsir_src_operand *src, unsigned int idx,
|
|
enum vsir_data_type data_type, enum vsir_dimension dimension)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_SSA, data_type, 1);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_SSA, data_type, 1);
|
|
src->reg.idx[0].offset = idx;
|
|
|
|
if (dimension == VSIR_DIMENSION_VEC4)
|
|
@@ -927,81 +942,81 @@ static void src_param_init_ssa(struct vkd3d_shader_src_param *src, unsigned int
|
|
}
|
|
}
|
|
|
|
-static void src_param_init_ssa_scalar(struct vkd3d_shader_src_param *src,
|
|
+static void vsir_src_operand_init_ssa_scalar(struct vsir_src_operand *src,
|
|
unsigned int idx, enum vsir_data_type data_type)
|
|
{
|
|
- src_param_init_ssa(src, idx, data_type, VSIR_DIMENSION_SCALAR);
|
|
+ vsir_src_operand_init_ssa(src, idx, data_type, VSIR_DIMENSION_SCALAR);
|
|
}
|
|
|
|
-static void src_param_init_ssa_bool(struct vkd3d_shader_src_param *src, unsigned int idx)
|
|
+static void vsir_src_operand_init_ssa_bool(struct vsir_src_operand *src, unsigned int idx)
|
|
{
|
|
- src_param_init_ssa_scalar(src, idx, VSIR_DATA_BOOL);
|
|
+ vsir_src_operand_init_ssa_scalar(src, idx, VSIR_DATA_BOOL);
|
|
}
|
|
|
|
-static void src_param_init_ssa_float(struct vkd3d_shader_src_param *src, unsigned int idx)
|
|
+static void vsir_src_operand_init_ssa_f32(struct vsir_src_operand *src, unsigned int idx)
|
|
{
|
|
- src_param_init_ssa_scalar(src, idx, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init_ssa_scalar(src, idx, VSIR_DATA_F32);
|
|
}
|
|
|
|
-static void src_param_init_ssa_float4(struct vkd3d_shader_src_param *src, unsigned int idx)
|
|
+static void vsir_src_operand_init_ssa_f32v4(struct vsir_src_operand *src, unsigned int idx)
|
|
{
|
|
- src_param_init_ssa(src, idx, VSIR_DATA_F32, VSIR_DIMENSION_VEC4);
|
|
+ vsir_src_operand_init_ssa(src, idx, VSIR_DATA_F32, VSIR_DIMENSION_VEC4);
|
|
}
|
|
|
|
-static void src_param_init_temp_bool(struct vkd3d_shader_src_param *src, unsigned int idx)
|
|
+static void vsir_src_operand_init_temp_bool(struct vsir_src_operand *src, unsigned int idx)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_TEMP, VSIR_DATA_BOOL, 1);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_TEMP, VSIR_DATA_BOOL, 1);
|
|
src->reg.idx[0].offset = idx;
|
|
}
|
|
|
|
-static void src_param_init_temp_float(struct vkd3d_shader_src_param *src, unsigned int idx)
|
|
+static void vsir_src_operand_init_temp_f32(struct vsir_src_operand *src, unsigned int idx)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_TEMP, VSIR_DATA_F32, 1);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_TEMP, VSIR_DATA_F32, 1);
|
|
src->reg.idx[0].offset = idx;
|
|
}
|
|
|
|
-static void src_param_init_temp_float4(struct vkd3d_shader_src_param *src, unsigned int idx)
|
|
+static void vsir_src_operand_init_temp_f32v4(struct vsir_src_operand *src, unsigned int idx)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_TEMP, VSIR_DATA_F32, 1);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_TEMP, VSIR_DATA_F32, 1);
|
|
src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
src->reg.idx[0].offset = idx;
|
|
}
|
|
|
|
-static void src_param_init_temp_uint(struct vkd3d_shader_src_param *src, unsigned int idx)
|
|
+static void vsir_src_operand_init_temp_u32(struct vsir_src_operand *src, unsigned int idx)
|
|
{
|
|
- vsir_src_param_init(src, VKD3DSPR_TEMP, VSIR_DATA_U32, 1);
|
|
+ vsir_src_operand_init(src, VKD3DSPR_TEMP, VSIR_DATA_U32, 1);
|
|
src->reg.idx[0].offset = idx;
|
|
}
|
|
|
|
-void vsir_dst_param_init(struct vkd3d_shader_dst_param *param, enum vkd3d_shader_register_type reg_type,
|
|
+void vsir_dst_operand_init(struct vsir_dst_operand *dst, enum vkd3d_shader_register_type reg_type,
|
|
enum vsir_data_type data_type, unsigned int idx_count)
|
|
{
|
|
- vsir_register_init(¶m->reg, reg_type, data_type, idx_count);
|
|
- param->write_mask = VKD3DSP_WRITEMASK_0;
|
|
- param->modifiers = VKD3DSPDM_NONE;
|
|
- param->shift = 0;
|
|
+ vsir_register_init(&dst->reg, reg_type, data_type, idx_count);
|
|
+ dst->write_mask = VKD3DSP_WRITEMASK_0;
|
|
+ dst->modifiers = VKD3DSPDM_NONE;
|
|
+ dst->shift = 0;
|
|
}
|
|
|
|
-static void vsir_dst_param_init_io(struct vkd3d_shader_dst_param *dst, enum vkd3d_shader_register_type reg_type,
|
|
+static void vsir_dst_operand_init_io(struct vsir_dst_operand *dst, enum vkd3d_shader_register_type reg_type,
|
|
const struct signature_element *e, unsigned int idx_count)
|
|
{
|
|
- vsir_dst_param_init(dst, reg_type, vsir_data_type_from_component_type(e->component_type), idx_count);
|
|
+ vsir_dst_operand_init(dst, reg_type, vsir_data_type_from_component_type(e->component_type), idx_count);
|
|
dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
dst->write_mask = e->mask;
|
|
}
|
|
|
|
-void vsir_dst_param_init_null(struct vkd3d_shader_dst_param *dst)
|
|
+void vsir_dst_operand_init_null(struct vsir_dst_operand *dst)
|
|
{
|
|
- vsir_dst_param_init(dst, VKD3DSPR_NULL, VSIR_DATA_UNUSED, 0);
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_NULL, VSIR_DATA_UNUSED, 0);
|
|
dst->reg.dimension = VSIR_DIMENSION_NONE;
|
|
dst->write_mask = 0;
|
|
}
|
|
|
|
-static void dst_param_init_ssa(struct vkd3d_shader_dst_param *dst, unsigned int idx,
|
|
+static void vsir_dst_operand_init_ssa(struct vsir_dst_operand *dst, unsigned int idx,
|
|
enum vsir_data_type data_type, enum vsir_dimension dimension)
|
|
{
|
|
- vsir_dst_param_init(dst, VKD3DSPR_SSA, data_type, 1);
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_SSA, data_type, 1);
|
|
dst->reg.idx[0].offset = idx;
|
|
|
|
if (dimension == VSIR_DIMENSION_VEC4)
|
|
@@ -1011,50 +1026,50 @@ static void dst_param_init_ssa(struct vkd3d_shader_dst_param *dst, unsigned int
|
|
}
|
|
}
|
|
|
|
-static void dst_param_init_ssa_scalar(struct vkd3d_shader_dst_param *dst,
|
|
+static void vsir_dst_operand_init_ssa_scalar(struct vsir_dst_operand *dst,
|
|
unsigned int idx, enum vsir_data_type data_type)
|
|
{
|
|
- dst_param_init_ssa(dst, idx, data_type, VSIR_DIMENSION_SCALAR);
|
|
+ vsir_dst_operand_init_ssa(dst, idx, data_type, VSIR_DIMENSION_SCALAR);
|
|
}
|
|
|
|
-static void dst_param_init_ssa_bool(struct vkd3d_shader_dst_param *dst, unsigned int idx)
|
|
+static void vsir_dst_operand_init_ssa_bool(struct vsir_dst_operand *dst, unsigned int idx)
|
|
{
|
|
- dst_param_init_ssa_scalar(dst, idx, VSIR_DATA_BOOL);
|
|
+ vsir_dst_operand_init_ssa_scalar(dst, idx, VSIR_DATA_BOOL);
|
|
}
|
|
|
|
-static void dst_param_init_ssa_float(struct vkd3d_shader_dst_param *dst, unsigned int idx)
|
|
+static void vsir_dst_operand_init_ssa_f32(struct vsir_dst_operand *dst, unsigned int idx)
|
|
{
|
|
- dst_param_init_ssa_scalar(dst, idx, VSIR_DATA_F32);
|
|
+ vsir_dst_operand_init_ssa_scalar(dst, idx, VSIR_DATA_F32);
|
|
}
|
|
|
|
-static void dst_param_init_ssa_float4(struct vkd3d_shader_dst_param *dst, unsigned int idx)
|
|
+static void vsir_dst_operand_init_ssa_f32v4(struct vsir_dst_operand *dst, unsigned int idx)
|
|
{
|
|
- dst_param_init_ssa(dst, idx, VSIR_DATA_F32, VSIR_DIMENSION_VEC4);
|
|
+ vsir_dst_operand_init_ssa(dst, idx, VSIR_DATA_F32, VSIR_DIMENSION_VEC4);
|
|
}
|
|
|
|
-static void dst_param_init_temp_bool(struct vkd3d_shader_dst_param *dst, unsigned int idx)
|
|
+static void vsir_dst_operand_init_temp_bool(struct vsir_dst_operand *dst, unsigned int idx)
|
|
{
|
|
- vsir_dst_param_init(dst, VKD3DSPR_TEMP, VSIR_DATA_BOOL, 1);
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_TEMP, VSIR_DATA_BOOL, 1);
|
|
dst->reg.idx[0].offset = idx;
|
|
}
|
|
|
|
-static void dst_param_init_temp_float4(struct vkd3d_shader_dst_param *dst, unsigned int idx)
|
|
+static void vsir_dst_operand_init_temp_f32v4(struct vsir_dst_operand *dst, unsigned int idx)
|
|
{
|
|
- vsir_dst_param_init(dst, VKD3DSPR_TEMP, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_TEMP, VSIR_DATA_F32, 1);
|
|
dst->reg.idx[0].offset = idx;
|
|
dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
}
|
|
|
|
-static void dst_param_init_temp_uint(struct vkd3d_shader_dst_param *dst, unsigned int idx)
|
|
+static void vsir_dst_operand_init_temp_u32(struct vsir_dst_operand *dst, unsigned int idx)
|
|
{
|
|
- vsir_dst_param_init(dst, VKD3DSPR_TEMP, VSIR_DATA_U32, 1);
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_TEMP, VSIR_DATA_U32, 1);
|
|
dst->reg.idx[0].offset = idx;
|
|
}
|
|
|
|
-static void dst_param_init_output(struct vkd3d_shader_dst_param *dst,
|
|
+static void vsir_dst_operand_init_output(struct vsir_dst_operand *dst,
|
|
enum vsir_data_type data_type, uint32_t idx, uint32_t write_mask)
|
|
{
|
|
- vsir_dst_param_init(dst, VKD3DSPR_OUTPUT, data_type, 1);
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_OUTPUT, data_type, 1);
|
|
dst->reg.idx[0].offset = idx;
|
|
dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
dst->write_mask = write_mask;
|
|
@@ -1079,13 +1094,13 @@ bool vsir_instruction_init_with_params(struct vsir_program *program,
|
|
ins->dst_count = dst_count;
|
|
ins->src_count = src_count;
|
|
|
|
- if (!(ins->dst = vsir_program_get_dst_params(program, ins->dst_count)))
|
|
+ if (!(ins->dst = vsir_program_get_dst_operands(program, ins->dst_count)))
|
|
{
|
|
ERR("Failed to allocate %u destination parameters.\n", dst_count);
|
|
return false;
|
|
}
|
|
|
|
- if (!(ins->src = vsir_program_get_src_params(program, ins->src_count)))
|
|
+ if (!(ins->src = vsir_program_get_src_operands(program, ins->src_count)))
|
|
{
|
|
ERR("Failed to allocate %u source parameters.\n", src_count);
|
|
return false;
|
|
@@ -1099,15 +1114,15 @@ bool vsir_instruction_init_with_params(struct vsir_program *program,
|
|
static bool vsir_instruction_init_label(struct vkd3d_shader_instruction *ins,
|
|
const struct vkd3d_shader_location *location, unsigned int label_id, struct vsir_program *program)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_param;
|
|
+ struct vsir_src_operand *src;
|
|
|
|
- if (!(src_param = vsir_program_get_src_params(program, 1)))
|
|
+ if (!(src = vsir_program_get_src_operands(program, 1)))
|
|
return false;
|
|
|
|
- vsir_src_param_init_label(src_param, label_id);
|
|
+ vsir_src_operand_init_label(src, label_id);
|
|
|
|
vsir_instruction_init(ins, location, VSIR_OP_LABEL);
|
|
- ins->src = src_param;
|
|
+ ins->src = src;
|
|
ins->src_count = 1;
|
|
|
|
return true;
|
|
@@ -1129,10 +1144,10 @@ static bool vsir_program_iterator_clone_instruction(struct vsir_program *program
|
|
|
|
*dst = *src;
|
|
|
|
- if (dst->dst_count && !(dst->dst = vsir_program_clone_dst_params(program, dst->dst, dst->dst_count)))
|
|
+ if (dst->dst_count && !(dst->dst = vsir_program_clone_dst_operands(program, dst->dst, dst->dst_count)))
|
|
return false;
|
|
|
|
- return !dst->src_count || (dst->src = vsir_program_clone_src_params(program, dst->src, dst->src_count));
|
|
+ return !dst->src_count || (dst->src = vsir_program_clone_src_operands(program, dst->src, dst->src_count));
|
|
}
|
|
|
|
static bool get_opcode_from_rel_op(enum vkd3d_shader_rel_op rel_op,
|
|
@@ -1233,11 +1248,11 @@ static enum vkd3d_result vsir_program_normalize_addr(struct vsir_program *progra
|
|
|
|
for (k = 0; k < ins->src_count; ++k)
|
|
{
|
|
- struct vkd3d_shader_src_param *src = &ins->src[k];
|
|
+ struct vsir_src_operand *src = &ins->src[k];
|
|
|
|
for (r = 0; r < src->reg.idx_count; ++r)
|
|
{
|
|
- struct vkd3d_shader_src_param *rel = src->reg.idx[r].rel_addr;
|
|
+ struct vsir_src_operand *rel = src->reg.idx[r].rel_addr;
|
|
|
|
if (rel && rel->reg.type == VKD3DSPR_ADDR)
|
|
{
|
|
@@ -1308,6 +1323,110 @@ static enum vkd3d_result vsir_program_lower_ifc(struct vsir_program *program,
|
|
return VKD3D_OK;
|
|
}
|
|
|
|
+static enum vkd3d_result vsir_program_lower_lrp(struct vsir_program *program, struct vsir_program_iterator *lrp)
|
|
+{
|
|
+ struct vkd3d_shader_instruction *ins = vsir_program_iterator_current(lrp);
|
|
+ const struct vkd3d_shader_location location = ins->location;
|
|
+ const struct vsir_src_operand *src = ins->src;
|
|
+ const struct vsir_dst_operand *dst = ins->dst;
|
|
+ struct vsir_program_iterator it;
|
|
+ unsigned int neg_id, mad_id;
|
|
+
|
|
+ /* lrp DST, SRC0, SRC1, SRC2
|
|
+ * ->
|
|
+ * neg srNEG, SRC0
|
|
+ * mad srMAD, srNEG, SRC2, SRC2
|
|
+ * mad DST, SRC0, SRC1, srMAD */
|
|
+
|
|
+ if (!(ins = vsir_program_iterator_insert_before(lrp, &it, 2)))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_NEG, 1, 1))
|
|
+ goto fail;
|
|
+ neg_id = program->ssa_count++;
|
|
+ vsir_dst_operand_init_ssa(&ins->dst[0], neg_id, src[0].reg.data_type, src[0].reg.dimension);
|
|
+ ins->src[0] = src[0];
|
|
+
|
|
+ ins = vsir_program_iterator_next(&it);
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_MAD, 1, 3))
|
|
+ goto fail;
|
|
+ mad_id = program->ssa_count++;
|
|
+ vsir_dst_operand_init_ssa(&ins->dst[0], mad_id, src[2].reg.data_type, src[2].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], neg_id, src[0].reg.data_type, src[0].reg.dimension);
|
|
+ ins->src[1] = src[2];
|
|
+ ins->src[2] = src[2];
|
|
+
|
|
+ ins = vsir_program_iterator_next(&it);
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_MAD, 1, 3))
|
|
+ goto fail;
|
|
+ ins->dst[0] = dst[0];
|
|
+ ins->src[0] = src[0];
|
|
+ ins->src[1] = src[1];
|
|
+ vsir_src_operand_init_ssa(&ins->src[2], mad_id, src[2].reg.data_type, src[2].reg.dimension);
|
|
+
|
|
+ return VKD3D_OK;
|
|
+
|
|
+fail:
|
|
+ vsir_program_iterator_nop_range(&it, lrp, &location);
|
|
+
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+}
|
|
+
|
|
+static enum vkd3d_result vsir_program_lower_nrm(struct vsir_program *program, struct vsir_program_iterator *nrm)
|
|
+{
|
|
+ struct vkd3d_shader_instruction *ins = vsir_program_iterator_current(nrm);
|
|
+ const struct vkd3d_shader_location location = ins->location;
|
|
+ const struct vsir_src_operand *src = ins->src;
|
|
+ const struct vsir_dst_operand *dst = ins->dst;
|
|
+ unsigned int dot_id, rsq_id, mul_id;
|
|
+ struct vsir_program_iterator it;
|
|
+
|
|
+ /* nrm DST, SRC
|
|
+ * ->
|
|
+ * dp3 srDOT, SRC, SRC
|
|
+ * rsq srRSQ, srDOT
|
|
+ * mul srMUL, srRSQ, SRC
|
|
+ * movc DST, srDOT, srMUL, srDOT */
|
|
+
|
|
+ if (!(ins = vsir_program_iterator_insert_before(nrm, &it, 3)))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_DP3, 1, 2))
|
|
+ goto fail;
|
|
+ dot_id = program->ssa_count++;
|
|
+ vsir_dst_operand_init_ssa(&ins->dst[0], dot_id, src[0].reg.data_type, VSIR_DIMENSION_SCALAR);
|
|
+ ins->src[0] = src[0];
|
|
+ ins->src[1] = src[0];
|
|
+
|
|
+ ins = vsir_program_iterator_next(&it);
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_RSQ, 1, 1))
|
|
+ goto fail;
|
|
+ rsq_id = program->ssa_count++;
|
|
+ vsir_dst_operand_init_ssa(&ins->dst[0], rsq_id, src[0].reg.data_type, VSIR_DIMENSION_SCALAR);
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], dot_id, src[0].reg.data_type, VSIR_DIMENSION_SCALAR);
|
|
+
|
|
+ ins = vsir_program_iterator_next(&it);
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_MUL, 1, 2))
|
|
+ goto fail;
|
|
+ mul_id = program->ssa_count++;
|
|
+ vsir_dst_operand_init_ssa(&ins->dst[0], mul_id, src[0].reg.data_type, dst[0].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], rsq_id, src[0].reg.data_type, VSIR_DIMENSION_SCALAR);
|
|
+ ins->src[1] = src[0];
|
|
+
|
|
+ ins = vsir_program_iterator_next(&it);
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_MOVC, 1, 3))
|
|
+ goto fail;
|
|
+ ins->dst[0] = dst[0];
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], dot_id, VSIR_DATA_U32, VSIR_DIMENSION_SCALAR);
|
|
+ vsir_src_operand_init_ssa(&ins->src[1], mul_id, src[0].reg.data_type, dst[0].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[2], dot_id, src[0].reg.data_type, VSIR_DIMENSION_SCALAR);
|
|
+
|
|
+ return VKD3D_OK;
|
|
+
|
|
+fail:
|
|
+ vsir_program_iterator_nop_range(&it, nrm, &location);
|
|
+
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+}
|
|
+
|
|
static enum vkd3d_result vsir_program_lower_texkill(struct vsir_program *program,
|
|
struct vsir_program_iterator *it, unsigned int *tmp_idx)
|
|
{
|
|
@@ -1396,7 +1515,7 @@ static enum vkd3d_result vsir_program_lower_precise_mad(struct vsir_program *pro
|
|
struct vsir_program_iterator *it)
|
|
{
|
|
struct vkd3d_shader_instruction *mad, *mul_ins, *add_ins;
|
|
- struct vkd3d_shader_dst_param *mul_dst;
|
|
+ struct vsir_dst_operand *mul_dst;
|
|
|
|
mad = vsir_program_iterator_current(it);
|
|
|
|
@@ -1420,8 +1539,9 @@ static enum vkd3d_result vsir_program_lower_precise_mad(struct vsir_program *pro
|
|
mul_dst = mul_ins->dst;
|
|
*add_ins->dst = *mul_dst;
|
|
|
|
- dst_param_init_ssa(mul_dst, program->ssa_count, mul_ins->src[0].reg.data_type, VSIR_DIMENSION_VEC4);
|
|
- src_param_init_ssa(&add_ins->src[0], program->ssa_count++, mul_ins->src[0].reg.data_type, VSIR_DIMENSION_VEC4);
|
|
+ vsir_dst_operand_init_ssa(mul_dst, program->ssa_count, mul_ins->src[0].reg.data_type, VSIR_DIMENSION_VEC4);
|
|
+ vsir_src_operand_init_ssa(&add_ins->src[0], program->ssa_count++,
|
|
+ mul_ins->src[0].reg.data_type, VSIR_DIMENSION_VEC4);
|
|
|
|
add_ins->src[1] = mul_ins->src[2];
|
|
|
|
@@ -1479,7 +1599,7 @@ static enum vkd3d_result vsir_program_lower_udiv(struct vsir_program *program,
|
|
|
|
mov->src[0] = udiv->src[0];
|
|
src0_id = program->ssa_count++;
|
|
- dst_param_init_ssa(&mov->dst[0], src0_id, udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
|
|
+ vsir_dst_operand_init_ssa(&mov->dst[0], src0_id, udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
|
|
|
|
mov = vsir_program_iterator_next(it);
|
|
if (!(vsir_instruction_init_with_params(program, mov, &udiv->location, VSIR_OP_MOV, 1, 1)))
|
|
@@ -1487,14 +1607,14 @@ static enum vkd3d_result vsir_program_lower_udiv(struct vsir_program *program,
|
|
|
|
mov->src[0] = udiv->src[1];
|
|
src1_id = program->ssa_count++;
|
|
- dst_param_init_ssa(&mov->dst[0], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_dst_operand_init_ssa(&mov->dst[0], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
|
|
mov = vsir_program_iterator_next(it);
|
|
if (!(vsir_instruction_init_with_params(program, mov, &udiv->location, VSIR_OP_MOVC, 1, 3)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
- src_param_init_ssa(&mov->src[0], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
- src_param_init_ssa(&mov->src[1], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&mov->src[0], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&mov->src[1], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
vsir_register_init(&mov->src[2].reg, VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
mov->src[2].reg.dimension = udiv->src[1].reg.dimension;
|
|
mov->src[2].reg.u.immconst_u32[0] = 1;
|
|
@@ -1504,7 +1624,7 @@ static enum vkd3d_result vsir_program_lower_udiv(struct vsir_program *program,
|
|
if (mov->src[2].reg.dimension == VSIR_DIMENSION_VEC4)
|
|
mov->src[2].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
divisor_id = program->ssa_count++;
|
|
- dst_param_init_ssa(&mov->dst[0], divisor_id, mov->src[1].reg.data_type, mov->src[1].reg.dimension);
|
|
+ vsir_dst_operand_init_ssa(&mov->dst[0], divisor_id, mov->src[1].reg.data_type, mov->src[1].reg.dimension);
|
|
|
|
if (udiv->dst[0].reg.type != VKD3DSPR_NULL)
|
|
{
|
|
@@ -1515,9 +1635,10 @@ static enum vkd3d_result vsir_program_lower_udiv(struct vsir_program *program,
|
|
|
|
ins->flags = udiv->flags;
|
|
|
|
- src_param_init_ssa(&ins->src[0], src0_id, udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
|
|
- src_param_init_ssa(&ins->src[1], divisor_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
- dst_param_init_ssa(&ins->dst[0], program->ssa_count, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], src0_id, udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[1], divisor_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_dst_operand_init_ssa(&ins->dst[0], program->ssa_count,
|
|
+ udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
|
|
/* Like its TPF equivalent, division by zero is well-defined for
|
|
* VSIR_OP_UDIV, and returns UINT_MAX. Division by zero is undefined
|
|
@@ -1526,8 +1647,9 @@ static enum vkd3d_result vsir_program_lower_udiv(struct vsir_program *program,
|
|
if (!(vsir_instruction_init_with_params(program, ins, &udiv->location, VSIR_OP_MOVC, 1, 3)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
- src_param_init_ssa(&ins->src[0], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
- src_param_init_ssa(&ins->src[1], program->ssa_count, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[1], program->ssa_count,
|
|
+ udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
vsir_register_init(&ins->src[2].reg, VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
ins->src[2].reg.dimension = udiv->src[1].reg.dimension;
|
|
ins->src[2].reg.u.immconst_u32[0] = UINT_MAX;
|
|
@@ -1550,16 +1672,18 @@ static enum vkd3d_result vsir_program_lower_udiv(struct vsir_program *program,
|
|
|
|
ins->flags = udiv->flags;
|
|
|
|
- src_param_init_ssa(&ins->src[0], src0_id, udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
|
|
- src_param_init_ssa(&ins->src[1], divisor_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
- dst_param_init_ssa(&ins->dst[0], program->ssa_count, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], src0_id, udiv->src[0].reg.data_type, udiv->src[0].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[1], divisor_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_dst_operand_init_ssa(&ins->dst[0], program->ssa_count,
|
|
+ udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
|
|
ins = vsir_program_iterator_next(it);
|
|
if (!(vsir_instruction_init_with_params(program, ins, &udiv->location, VSIR_OP_MOVC, 1, 3)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
- src_param_init_ssa(&ins->src[0], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
- src_param_init_ssa(&ins->src[1], program->ssa_count, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], src1_id, udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&ins->src[1], program->ssa_count,
|
|
+ udiv->src[1].reg.data_type, udiv->src[1].reg.dimension);
|
|
vsir_register_init(&ins->src[2].reg, VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
ins->src[2].reg.dimension = udiv->src[1].reg.dimension;
|
|
ins->src[2].reg.u.immconst_u32[0] = UINT_MAX;
|
|
@@ -1602,7 +1726,7 @@ static enum vkd3d_result vsir_program_lower_sm1_sincos(struct vsir_program *prog
|
|
s = vsir_swizzle_get_component(sincos->src->swizzle, 0);
|
|
mov->src[0].swizzle = vkd3d_shader_create_swizzle(s, s, s, s);
|
|
|
|
- dst_param_init_ssa_scalar(&mov->dst[0], program->ssa_count, sincos->src[0].reg.data_type);
|
|
+ vsir_dst_operand_init_ssa_scalar(&mov->dst[0], program->ssa_count, sincos->src[0].reg.data_type);
|
|
|
|
if (sincos->dst->write_mask & VKD3DSP_WRITEMASK_1)
|
|
{
|
|
@@ -1613,7 +1737,7 @@ static enum vkd3d_result vsir_program_lower_sm1_sincos(struct vsir_program *prog
|
|
|
|
ins->flags = sincos->flags;
|
|
|
|
- src_param_init_ssa_scalar(&ins->src[0], program->ssa_count, sincos->src[0].reg.data_type);
|
|
+ vsir_src_operand_init_ssa_scalar(&ins->src[0], program->ssa_count, sincos->src[0].reg.data_type);
|
|
|
|
ins->dst[0] = *sincos->dst;
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_1;
|
|
@@ -1628,7 +1752,7 @@ static enum vkd3d_result vsir_program_lower_sm1_sincos(struct vsir_program *prog
|
|
|
|
ins->flags = sincos->flags;
|
|
|
|
- src_param_init_ssa_scalar(&ins->src[0], program->ssa_count, sincos->src[0].reg.data_type);
|
|
+ vsir_src_operand_init_ssa_scalar(&ins->src[0], program->ssa_count, sincos->src[0].reg.data_type);
|
|
|
|
ins->dst[0] = *sincos->dst;
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0;
|
|
@@ -1672,7 +1796,8 @@ static enum vkd3d_result vsir_program_lower_sm4_sincos(struct vsir_program *prog
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
mov->src[0] = sincos->src[0];
|
|
- dst_param_init_ssa(&mov->dst[0], program->ssa_count, sincos->src[0].reg.data_type, sincos->src[0].reg.dimension);
|
|
+ vsir_dst_operand_init_ssa(&mov->dst[0], program->ssa_count,
|
|
+ sincos->src[0].reg.data_type, sincos->src[0].reg.dimension);
|
|
|
|
if (sincos->dst[0].reg.type != VKD3DSPR_NULL)
|
|
{
|
|
@@ -1683,7 +1808,7 @@ static enum vkd3d_result vsir_program_lower_sm4_sincos(struct vsir_program *prog
|
|
|
|
ins->flags = sincos->flags;
|
|
|
|
- src_param_init_ssa(&ins->src[0], program->ssa_count,
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], program->ssa_count,
|
|
sincos->src[0].reg.data_type, sincos->src[0].reg.dimension);
|
|
ins->dst[0] = sincos->dst[0];
|
|
}
|
|
@@ -1697,7 +1822,7 @@ static enum vkd3d_result vsir_program_lower_sm4_sincos(struct vsir_program *prog
|
|
|
|
ins->flags = sincos->flags;
|
|
|
|
- src_param_init_ssa(&ins->src[0], program->ssa_count,
|
|
+ vsir_src_operand_init_ssa(&ins->src[0], program->ssa_count,
|
|
sincos->src[0].reg.data_type, sincos->src[0].reg.dimension);
|
|
ins->dst[0] = sincos->dst[1];
|
|
}
|
|
@@ -1730,7 +1855,7 @@ static enum vkd3d_result vsir_program_lower_texld_sm1(struct vsir_program *progr
|
|
{
|
|
const struct vkd3d_shader_descriptor_info1 *sampler;
|
|
unsigned int idx = ins->dst[0].reg.idx[0].offset;
|
|
- struct vkd3d_shader_src_param *srcs;
|
|
+ struct vsir_src_operand *srcs;
|
|
|
|
/* texld DST, t# -> sample DST, t#, resource#, sampler# */
|
|
|
|
@@ -1741,13 +1866,13 @@ static enum vkd3d_result vsir_program_lower_texld_sm1(struct vsir_program *progr
|
|
return VKD3D_ERROR_NOT_IMPLEMENTED;
|
|
}
|
|
|
|
- if (!(srcs = vsir_program_get_src_params(program, 4)))
|
|
+ if (!(srcs = vsir_program_get_src_operands(program, 4)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
/* Note we run before I/O normalization. */
|
|
srcs[0] = ins->src[0];
|
|
- vsir_src_param_init_resource(&srcs[1], idx, idx);
|
|
- vsir_src_param_init_sampler(&srcs[2], idx, idx);
|
|
+ vsir_src_operand_init_resource(&srcs[1], idx, idx);
|
|
+ vsir_src_operand_init_sampler(&srcs[2], idx, idx);
|
|
|
|
sampler = vkd3d_shader_find_descriptor(&program->descriptors, VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER, idx);
|
|
if (sampler->flags & VKD3D_SHADER_DESCRIPTOR_INFO_FLAG_SAMPLER_COMPARISON_MODE)
|
|
@@ -1800,7 +1925,7 @@ static enum vkd3d_result vsir_program_lower_texldp(struct vsir_program *program,
|
|
if (!vsir_instruction_init_with_params(program, div_ins, &tex->location, VSIR_OP_DIV, 1, 2))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
- vsir_dst_param_init(&div_ins->dst[0], VKD3DSPR_TEMP, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&div_ins->dst[0], VKD3DSPR_TEMP, VSIR_DATA_F32, 1);
|
|
div_ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
div_ins->dst[0].reg.idx[0].offset = *tmp_idx;
|
|
div_ins->dst[0].write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
@@ -1830,17 +1955,17 @@ static enum vkd3d_result vsir_program_lower_texld(struct vsir_program *program,
|
|
{
|
|
const struct vkd3d_shader_descriptor_info1 *sampler;
|
|
unsigned int idx = tex->src[1].reg.idx[0].offset;
|
|
- struct vkd3d_shader_src_param *srcs;
|
|
+ struct vsir_src_operand *srcs;
|
|
|
|
VKD3D_ASSERT(tex->src[1].reg.idx_count == 1);
|
|
VKD3D_ASSERT(!tex->src[1].reg.idx[0].rel_addr);
|
|
|
|
- if (!(srcs = vsir_program_get_src_params(program, 4)))
|
|
+ if (!(srcs = vsir_program_get_src_operands(program, 4)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
srcs[0] = tex->src[0];
|
|
- vsir_src_param_init_resource(&srcs[1], idx, idx);
|
|
- vsir_src_param_init_sampler(&srcs[2], idx, idx);
|
|
+ vsir_src_operand_init_resource(&srcs[1], idx, idx);
|
|
+ vsir_src_operand_init_sampler(&srcs[2], idx, idx);
|
|
|
|
sampler = vkd3d_shader_find_descriptor(&program->descriptors, VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER, idx);
|
|
if (sampler->flags & VKD3D_SHADER_DESCRIPTOR_INFO_FLAG_SAMPLER_COMPARISON_MODE)
|
|
@@ -1887,17 +2012,17 @@ static enum vkd3d_result vsir_program_lower_texldd(struct vsir_program *program,
|
|
struct vkd3d_shader_instruction *texldd)
|
|
{
|
|
unsigned int idx = texldd->src[1].reg.idx[0].offset;
|
|
- struct vkd3d_shader_src_param *srcs;
|
|
+ struct vsir_src_operand *srcs;
|
|
|
|
VKD3D_ASSERT(texldd->src[1].reg.idx_count == 1);
|
|
VKD3D_ASSERT(!texldd->src[1].reg.idx[0].rel_addr);
|
|
|
|
- if (!(srcs = vsir_program_get_src_params(program, 5)))
|
|
+ if (!(srcs = vsir_program_get_src_operands(program, 5)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
srcs[0] = texldd->src[0];
|
|
- vsir_src_param_init_resource(&srcs[1], idx, idx);
|
|
- vsir_src_param_init_sampler(&srcs[2], idx, idx);
|
|
+ vsir_src_operand_init_resource(&srcs[1], idx, idx);
|
|
+ vsir_src_operand_init_sampler(&srcs[2], idx, idx);
|
|
srcs[3] = texldd->src[2];
|
|
srcs[4] = texldd->src[3];
|
|
|
|
@@ -1913,17 +2038,17 @@ static enum vkd3d_result vsir_program_lower_texldl(struct vsir_program *program,
|
|
{
|
|
unsigned int idx = texldl->src[1].reg.idx[0].offset;
|
|
enum vkd3d_shader_swizzle_component w;
|
|
- struct vkd3d_shader_src_param *srcs;
|
|
+ struct vsir_src_operand *srcs;
|
|
|
|
VKD3D_ASSERT(texldl->src[1].reg.idx_count == 1);
|
|
VKD3D_ASSERT(!texldl->src[1].reg.idx[0].rel_addr);
|
|
|
|
- if (!(srcs = vsir_program_get_src_params(program, 4)))
|
|
+ if (!(srcs = vsir_program_get_src_operands(program, 4)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
srcs[0] = texldl->src[0];
|
|
- vsir_src_param_init_resource(&srcs[1], idx, idx);
|
|
- vsir_src_param_init_sampler(&srcs[2], idx, idx);
|
|
+ vsir_src_operand_init_resource(&srcs[1], idx, idx);
|
|
+ vsir_src_operand_init_sampler(&srcs[2], idx, idx);
|
|
|
|
texldl->opcode = VSIR_OP_SAMPLE_LOD;
|
|
texldl->src = srcs;
|
|
@@ -1936,11 +2061,39 @@ static enum vkd3d_result vsir_program_lower_texldl(struct vsir_program *program,
|
|
return VKD3D_OK;
|
|
}
|
|
|
|
-static enum vkd3d_result vsir_program_lower_tex(struct vsir_program *program, struct vkd3d_shader_instruction *ins)
|
|
+static bool is_texture_projected(const struct vsir_program *program,
|
|
+ struct vkd3d_shader_message_context *message_context, unsigned int index)
|
|
{
|
|
+ const struct vkd3d_shader_parameter1 *parameter;
|
|
+
|
|
+ if (!(parameter = vsir_program_get_parameter(program, VKD3D_SHADER_PARAMETER_NAME_PROJECTED_TEXTURE_MASK)))
|
|
+ return false;
|
|
+
|
|
+ if (parameter->type != VKD3D_SHADER_PARAMETER_TYPE_IMMEDIATE_CONSTANT)
|
|
+ {
|
|
+ vkd3d_shader_error(message_context, NULL, VKD3D_SHADER_ERROR_VSIR_NOT_IMPLEMENTED,
|
|
+ "Unsupported projected texture mask parameter type %#x.", parameter->type);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ if (parameter->data_type != VKD3D_SHADER_PARAMETER_DATA_TYPE_UINT32)
|
|
+ {
|
|
+ vkd3d_shader_error(message_context, NULL, VKD3D_SHADER_ERROR_VSIR_INVALID_DATA_TYPE,
|
|
+ "Invalid projected texture mask parameter data type %#x.", parameter->data_type);
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ return parameter->u.immediate_constant.u.u32 & (1u << index);
|
|
+}
|
|
+
|
|
+static enum vkd3d_result vsir_program_lower_tex(struct vsir_program *program,
|
|
+ struct vsir_program_iterator *it, struct vkd3d_shader_message_context *message_context)
|
|
+{
|
|
+ struct vkd3d_shader_instruction *ins = vsir_program_iterator_current(it);
|
|
+ const struct vkd3d_shader_location location = ins->location;
|
|
const struct vkd3d_shader_descriptor_info1 *sampler;
|
|
unsigned int idx = ins->dst[0].reg.idx[0].offset;
|
|
- struct vkd3d_shader_src_param *srcs;
|
|
+ struct vsir_src_operand *srcs;
|
|
|
|
/* tex t# -> sample t#, t#, resource#, sampler#
|
|
* Note that the t# destination will subsequently be turned into a temp. */
|
|
@@ -1948,16 +2101,46 @@ static enum vkd3d_result vsir_program_lower_tex(struct vsir_program *program, st
|
|
/* We run before I/O normalization. */
|
|
VKD3D_ASSERT(program->normalisation_level < VSIR_NORMALISED_SM6);
|
|
|
|
- if (!(srcs = vsir_program_get_src_params(program, 4)))
|
|
+ if (!(srcs = vsir_program_get_src_operands(program, 4)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
- vsir_src_param_init(&srcs[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
- srcs[0].reg.idx[0].offset = idx;
|
|
- srcs[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
- srcs[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
+ if (is_texture_projected(program, message_context, idx))
|
|
+ {
|
|
+ struct vsir_dst_operand *dst = ins->dst;
|
|
+ uint32_t coords = program->ssa_count++;
|
|
|
|
- vsir_src_param_init_resource(&srcs[1], idx, idx);
|
|
- vsir_src_param_init_sampler(&srcs[2], idx, idx);
|
|
+ /* div sr0, t#, t#.w */
|
|
+
|
|
+ if (!vsir_program_iterator_insert_after(it, 1))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+
|
|
+ ins = vsir_program_iterator_current(it);
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_DIV, 1, 2))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ vsir_dst_operand_init_ssa_f32v4(&ins->dst[0], coords);
|
|
+ vsir_src_operand_init(&ins->src[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
+ ins->src[0].reg.idx[0].offset = idx;
|
|
+ ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
+ ins->src[1] = ins->src[0];
|
|
+ ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(W, W, W, W);
|
|
+
|
|
+ ins = vsir_program_iterator_next(it);
|
|
+ vsir_instruction_init(ins, &location, VSIR_OP_SAMPLE);
|
|
+ ins->dst_count = 1;
|
|
+ ins->dst = dst;
|
|
+ vsir_src_operand_init_ssa_f32v4(&srcs[0], coords);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ vsir_src_operand_init(&srcs[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
+ srcs[0].reg.idx[0].offset = idx;
|
|
+ srcs[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ srcs[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
+ }
|
|
+
|
|
+ vsir_src_operand_init_resource(&srcs[1], idx, idx);
|
|
+ vsir_src_operand_init_sampler(&srcs[2], idx, idx);
|
|
|
|
sampler = vkd3d_shader_find_descriptor(&program->descriptors, VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER, idx);
|
|
if (sampler->flags & VKD3D_SHADER_DESCRIPTOR_INFO_FLAG_SAMPLER_COMPARISON_MODE)
|
|
@@ -1985,7 +2168,7 @@ static enum vkd3d_result vsir_program_lower_texcoord(struct vsir_program *progra
|
|
struct vkd3d_shader_instruction *ins)
|
|
{
|
|
unsigned int idx = ins->dst[0].reg.idx[0].offset;
|
|
- struct vkd3d_shader_src_param *srcs;
|
|
+ struct vsir_src_operand *srcs;
|
|
|
|
/* texcoord t# -> saturate t#, t#
|
|
* Note that the t# destination will subsequently be turned into a temp. */
|
|
@@ -1993,10 +2176,10 @@ static enum vkd3d_result vsir_program_lower_texcoord(struct vsir_program *progra
|
|
/* We run before I/O normalization. */
|
|
VKD3D_ASSERT(program->normalisation_level < VSIR_NORMALISED_SM6);
|
|
|
|
- if (!(srcs = vsir_program_get_src_params(program, 1)))
|
|
+ if (!(srcs = vsir_program_get_src_operands(program, 1)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
- vsir_src_param_init(&srcs[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
+ vsir_src_operand_init(&srcs[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
srcs[0].reg.idx[0].offset = idx;
|
|
srcs[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
srcs[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
@@ -2009,8 +2192,8 @@ static enum vkd3d_result vsir_program_lower_texcoord(struct vsir_program *progra
|
|
}
|
|
|
|
static struct vkd3d_shader_instruction *generate_bump_coords(struct vsir_program *program,
|
|
- struct vsir_program_iterator *it, uint32_t idx, const struct vkd3d_shader_src_param *coords,
|
|
- const struct vkd3d_shader_src_param *perturbation, const struct vkd3d_shader_location *loc)
|
|
+ struct vsir_program_iterator *it, uint32_t idx, const struct vsir_src_operand *coords,
|
|
+ const struct vsir_src_operand *perturbation, const struct vkd3d_shader_location *loc)
|
|
{
|
|
struct vkd3d_shader_instruction *ins;
|
|
uint32_t ssa_temp, ssa_coords;
|
|
@@ -2027,23 +2210,23 @@ static struct vkd3d_shader_instruction *generate_bump_coords(struct vsir_program
|
|
ins = vsir_program_iterator_current(it);
|
|
if (!vsir_instruction_init_with_params(program, ins, loc, VSIR_OP_MAD, 1, 3))
|
|
return false;
|
|
- dst_param_init_ssa_float4(&ins->dst[0], ssa_temp);
|
|
+ vsir_dst_operand_init_ssa_f32v4(&ins->dst[0], ssa_temp);
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0 | VKD3DSP_WRITEMASK_1;
|
|
ins->src[0] = *perturbation;
|
|
ins->src[0].swizzle = vsir_combine_swizzles(perturbation->swizzle, VKD3D_SHADER_SWIZZLE(X, X, X, X));
|
|
- src_param_init_parameter_vec4(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_BUMP_MATRIX_0 + idx, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init_parameter_vec4(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_BUMP_MATRIX_0 + idx, VSIR_DATA_F32);
|
|
ins->src[2] = *coords;
|
|
|
|
ins = vsir_program_iterator_next(it);
|
|
if (!vsir_instruction_init_with_params(program, ins, loc, VSIR_OP_MAD, 1, 3))
|
|
return false;
|
|
- dst_param_init_ssa_float4(&ins->dst[0], ssa_coords);
|
|
+ vsir_dst_operand_init_ssa_f32v4(&ins->dst[0], ssa_coords);
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0 | VKD3DSP_WRITEMASK_1;
|
|
ins->src[0] = *perturbation;
|
|
ins->src[0].swizzle = vsir_combine_swizzles(perturbation->swizzle, VKD3D_SHADER_SWIZZLE(Y, Y, Y, Y));
|
|
- src_param_init_parameter_vec4(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_BUMP_MATRIX_0 + idx, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init_parameter_vec4(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_BUMP_MATRIX_0 + idx, VSIR_DATA_F32);
|
|
ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(Z, W, W, W);
|
|
- src_param_init_ssa_float4(&ins->src[2], ssa_temp);
|
|
+ vsir_src_operand_init_ssa_f32v4(&ins->src[2], ssa_temp);
|
|
ins->src[2].swizzle = VKD3D_SHADER_SWIZZLE(X, Y, Y, Y);
|
|
|
|
return ins;
|
|
@@ -2053,8 +2236,8 @@ static enum vkd3d_result vsir_program_lower_bem(struct vsir_program *program, st
|
|
{
|
|
struct vkd3d_shader_instruction *ins = vsir_program_iterator_current(it);
|
|
const struct vkd3d_shader_location location = ins->location;
|
|
- const struct vkd3d_shader_src_param *src = ins->src;
|
|
- const struct vkd3d_shader_dst_param *dst = ins->dst;
|
|
+ const struct vsir_src_operand *src = ins->src;
|
|
+ const struct vsir_dst_operand *dst = ins->dst;
|
|
|
|
/* bem DST.xy, SRC0, SRC1
|
|
* ->
|
|
@@ -2077,11 +2260,12 @@ static enum vkd3d_result vsir_program_lower_texbem(struct vsir_program *program,
|
|
struct vkd3d_shader_instruction *ins = vsir_program_iterator_current(it);
|
|
const struct vkd3d_shader_location location = ins->location;
|
|
const struct vkd3d_shader_descriptor_info1 *descriptor;
|
|
- const struct vkd3d_shader_src_param *src = ins->src;
|
|
bool is_texbeml = (ins->opcode == VSIR_OP_TEXBEML);
|
|
unsigned int idx = ins->dst[0].reg.idx[0].offset;
|
|
uint32_t ssa_coords, ssa_luminance, ssa_sample;
|
|
- struct vkd3d_shader_src_param orig_coords;
|
|
+ const struct vsir_src_operand *src = ins->src;
|
|
+ struct vsir_src_operand orig_coords;
|
|
+ bool projected;
|
|
|
|
/* texbem t#, SRC
|
|
* ->
|
|
@@ -2097,6 +2281,11 @@ static enum vkd3d_result vsir_program_lower_texbem(struct vsir_program *program,
|
|
* mad srLUM.x, SRC.z, BUMP_LUMINANCE_SCALE#, BUMP_LUMINANCE_OFFSET#
|
|
* mul t#, t#, srLUM.xxxx
|
|
*
|
|
+ * If projecting, we replace srCOORDS calculation with
|
|
+ *
|
|
+ * div srPROJ, t#, t#.w
|
|
+ * bem srCOORDS.xy, srPROJ.xy, SRC
|
|
+ *
|
|
* Note that the t# destination will subsequently be turned into a temp. */
|
|
|
|
descriptor = vkd3d_shader_find_descriptor(&program->descriptors, VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER, idx);
|
|
@@ -2115,14 +2304,32 @@ static enum vkd3d_result vsir_program_lower_texbem(struct vsir_program *program,
|
|
return VKD3D_ERROR_NOT_IMPLEMENTED;
|
|
}
|
|
|
|
- if (!vsir_program_iterator_insert_after(it, is_texbeml ? 4 : 2))
|
|
+ projected = is_texture_projected(program, message_context, idx);
|
|
+ if (!vsir_program_iterator_insert_after(it, 2 + (is_texbeml ? 2 : 0) + (projected ? 1 : 0)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
- vsir_src_param_init(&orig_coords, VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
+ vsir_src_operand_init(&orig_coords, VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
orig_coords.reg.idx[0].offset = idx;
|
|
orig_coords.reg.dimension = VSIR_DIMENSION_VEC4;
|
|
orig_coords.swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
|
|
+ if (projected)
|
|
+ {
|
|
+ uint32_t ssa_proj = program->ssa_count++;
|
|
+
|
|
+ ins = vsir_program_iterator_current(it);
|
|
+ if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_DIV, 1, 2))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ vsir_dst_operand_init_ssa_f32v4(&ins->dst[0], ssa_proj);
|
|
+ ins->src[0] = orig_coords;
|
|
+ ins->src[1] = ins->src[0];
|
|
+ ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(W, W, W, W);
|
|
+
|
|
+ vsir_src_operand_init_ssa_f32v4(&orig_coords, ssa_proj);
|
|
+
|
|
+ vsir_program_iterator_next(it);
|
|
+ }
|
|
+
|
|
if (!(ins = generate_bump_coords(program, it, idx, &orig_coords, &src[0], &location)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
ssa_coords = ins->dst[0].reg.idx[0].offset;
|
|
@@ -2130,14 +2337,14 @@ static enum vkd3d_result vsir_program_lower_texbem(struct vsir_program *program,
|
|
ins = vsir_program_iterator_next(it);
|
|
if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_SAMPLE, 1, 3))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = idx;
|
|
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
- src_param_init_ssa_float4(&ins->src[0], ssa_coords);
|
|
+ vsir_src_operand_init_ssa_f32v4(&ins->src[0], ssa_coords);
|
|
ins->src[0].swizzle = VKD3D_SHADER_SWIZZLE(X, Y, Y, Y);
|
|
- vsir_src_param_init_resource(&ins->src[1], idx, idx);
|
|
- vsir_src_param_init_sampler(&ins->src[2], idx, idx);
|
|
+ vsir_src_operand_init_resource(&ins->src[1], idx, idx);
|
|
+ vsir_src_operand_init_sampler(&ins->src[2], idx, idx);
|
|
|
|
if (is_texbeml)
|
|
{
|
|
@@ -2147,29 +2354,29 @@ static enum vkd3d_result vsir_program_lower_texbem(struct vsir_program *program,
|
|
ssa_luminance = program->ssa_count++;
|
|
|
|
/* Replace t# destination of the SAMPLE instruction with an SSA value. */
|
|
- dst_param_init_ssa_float4(&ins->dst[0], ssa_sample);
|
|
+ vsir_dst_operand_init_ssa_f32v4(&ins->dst[0], ssa_sample);
|
|
|
|
ins = vsir_program_iterator_next(it);
|
|
if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_MAD, 1, 3))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
- dst_param_init_ssa_float4(&ins->dst[0], ssa_luminance);
|
|
+ vsir_dst_operand_init_ssa_f32v4(&ins->dst[0], ssa_luminance);
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0;
|
|
ins->src[0] = src[0];
|
|
ins->src[0].swizzle = vkd3d_shader_create_swizzle(z, z, z, z);
|
|
- src_param_init_parameter(&ins->src[1],
|
|
+ vsir_src_operand_init_parameter(&ins->src[1],
|
|
VKD3D_SHADER_PARAMETER_NAME_BUMP_LUMINANCE_SCALE_0 + idx, VSIR_DATA_F32);
|
|
- src_param_init_parameter(&ins->src[2],
|
|
+ vsir_src_operand_init_parameter(&ins->src[2],
|
|
VKD3D_SHADER_PARAMETER_NAME_BUMP_LUMINANCE_OFFSET_0 + idx, VSIR_DATA_F32);
|
|
|
|
ins = vsir_program_iterator_next(it);
|
|
if (!vsir_instruction_init_with_params(program, ins, &location, VSIR_OP_MUL, 1, 2))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_TEXTURE, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = idx;
|
|
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
- src_param_init_ssa_float4(&ins->src[0], ssa_sample);
|
|
- src_param_init_ssa_float4(&ins->src[1], ssa_luminance);
|
|
+ vsir_src_operand_init_ssa_f32v4(&ins->src[0], ssa_sample);
|
|
+ vsir_src_operand_init_ssa_f32v4(&ins->src[1], ssa_luminance);
|
|
ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
}
|
|
return VKD3D_OK;
|
|
@@ -2267,6 +2474,14 @@ static enum vkd3d_result vsir_program_lower_d3dbc_instructions(struct vsir_progr
|
|
ret = vsir_program_lower_ifc(program, &it, &tmp_idx, message_context);
|
|
break;
|
|
|
|
+ case VSIR_OP_LRP:
|
|
+ ret = vsir_program_lower_lrp(program, &it);
|
|
+ break;
|
|
+
|
|
+ case VSIR_OP_NRM:
|
|
+ ret = vsir_program_lower_nrm(program, &it);
|
|
+ break;
|
|
+
|
|
case VSIR_OP_SINCOS:
|
|
ret = vsir_program_lower_sm1_sincos(program, &it);
|
|
break;
|
|
@@ -2290,7 +2505,7 @@ static enum vkd3d_result vsir_program_lower_d3dbc_instructions(struct vsir_progr
|
|
break;
|
|
|
|
case VSIR_OP_TEX:
|
|
- ret = vsir_program_lower_tex(program, ins);
|
|
+ ret = vsir_program_lower_tex(program, &it, message_context);
|
|
break;
|
|
|
|
case VSIR_OP_TEXLD:
|
|
@@ -2353,7 +2568,7 @@ static enum vkd3d_result vsir_program_lower_modifiers(struct vsir_program *progr
|
|
for (i = 0; i < ins->src_count; ++i)
|
|
{
|
|
enum vkd3d_shader_opcode new_opcodes[2] = {VSIR_OP_NOP, VSIR_OP_NOP};
|
|
- struct vkd3d_shader_src_param *src = &ins->src[i];
|
|
+ struct vsir_src_operand *src = &ins->src[i];
|
|
|
|
switch (src->modifiers)
|
|
{
|
|
@@ -2395,8 +2610,9 @@ static enum vkd3d_result vsir_program_lower_modifiers(struct vsir_program *progr
|
|
new_ins->src[0] = *src;
|
|
new_ins->src[0].modifiers = VKD3DSPSM_NONE;
|
|
|
|
- dst_param_init_ssa(&new_ins->dst[0], program->ssa_count, src->reg.data_type, src->reg.dimension);
|
|
- src_param_init_ssa(src, program->ssa_count, src->reg.data_type, src->reg.dimension);
|
|
+ vsir_dst_operand_init_ssa(&new_ins->dst[0], program->ssa_count,
|
|
+ src->reg.data_type, src->reg.dimension);
|
|
+ vsir_src_operand_init_ssa(src, program->ssa_count, src->reg.data_type, src->reg.dimension);
|
|
|
|
if (data_type_is_64_bit(src->reg.data_type))
|
|
{
|
|
@@ -2410,7 +2626,7 @@ static enum vkd3d_result vsir_program_lower_modifiers(struct vsir_program *progr
|
|
|
|
for (i = 0; i < ins->dst_count; ++i)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->dst[i];
|
|
+ struct vsir_dst_operand *dst = &ins->dst[i];
|
|
|
|
/* It is always legitimate to ignore _pp. */
|
|
dst->modifiers &= ~VKD3DSPDM_PARTIALPRECISION;
|
|
@@ -2439,8 +2655,9 @@ static enum vkd3d_result vsir_program_lower_modifiers(struct vsir_program *progr
|
|
new_ins->dst[0] = *dst;
|
|
new_ins->dst[0].modifiers &= ~VKD3DSPDM_SATURATE;
|
|
|
|
- dst_param_init_ssa(dst, program->ssa_count, dst->reg.data_type, dst->reg.dimension);
|
|
- src_param_init_ssa(&new_ins->src[0], program->ssa_count, dst->reg.data_type, dst->reg.dimension);
|
|
+ vsir_dst_operand_init_ssa(dst, program->ssa_count, dst->reg.data_type, dst->reg.dimension);
|
|
+ vsir_src_operand_init_ssa(&new_ins->src[0], program->ssa_count,
|
|
+ dst->reg.data_type, dst->reg.dimension);
|
|
|
|
if (data_type_is_64_bit(dst->reg.data_type))
|
|
{
|
|
@@ -2453,7 +2670,7 @@ static enum vkd3d_result vsir_program_lower_modifiers(struct vsir_program *progr
|
|
}
|
|
}
|
|
|
|
- program->has_no_modifiers = true;
|
|
+ program->normalisation_flags.has_no_modifiers = true;
|
|
|
|
return ret;
|
|
}
|
|
@@ -2556,7 +2773,7 @@ static enum vkd3d_result vsir_program_lower_texture_writes(struct vsir_program *
|
|
{
|
|
for (unsigned int i = 0; i < ins->src_count; ++i)
|
|
{
|
|
- struct vkd3d_shader_src_param *src = &ins->src[i];
|
|
+ struct vsir_src_operand *src = &ins->src[i];
|
|
|
|
if (src->reg.type == VKD3DSPR_TEXTURE && bitmap_is_set(&texture_written_mask, src->reg.idx[0].offset))
|
|
{
|
|
@@ -2567,7 +2784,7 @@ static enum vkd3d_result vsir_program_lower_texture_writes(struct vsir_program *
|
|
|
|
for (unsigned int i = 0; i < ins->dst_count; ++i)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->dst[i];
|
|
+ struct vsir_dst_operand *dst = &ins->dst[i];
|
|
|
|
if (dst->reg.type == VKD3DSPR_TEXTURE)
|
|
{
|
|
@@ -2635,9 +2852,9 @@ static enum vkd3d_result vsir_program_normalise_ps1_output(struct vsir_program *
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
}
|
|
|
|
- src_param_init_temp_float4(&ins->src[0], 0);
|
|
+ vsir_src_operand_init_temp_f32v4(&ins->src[0], 0);
|
|
ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_COLOROUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_COLOROUT, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = 0;
|
|
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
@@ -2722,11 +2939,11 @@ static enum vkd3d_result vsir_program_ensure_diffuse(struct vsir_program *progra
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_ATTROUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_ATTROUT, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = 0;
|
|
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_ALL & ~program->diffuse_written_mask;
|
|
- vsir_src_param_init(&ins->src[0], VKD3DSPR_IMMCONST, VSIR_DATA_F32, 0);
|
|
+ vsir_src_operand_init(&ins->src[0], VKD3DSPR_IMMCONST, VSIR_DATA_F32, 0);
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
for (i = 0; i < 4; ++i)
|
|
@@ -2780,7 +2997,7 @@ static bool target_allows_subset_masks(const struct vkd3d_shader_compile_info *i
|
|
}
|
|
|
|
static void remove_unread_output_components(const struct shader_signature *signature,
|
|
- struct vkd3d_shader_instruction *ins, struct vkd3d_shader_dst_param *dst)
|
|
+ struct vkd3d_shader_instruction *ins, struct vsir_dst_operand *dst)
|
|
{
|
|
const struct signature_element *e;
|
|
|
|
@@ -2813,7 +3030,7 @@ static void remove_unread_output_components(const struct shader_signature *signa
|
|
if (ins->dst_count == 1)
|
|
vkd3d_shader_instruction_make_nop(ins);
|
|
else
|
|
- vsir_dst_param_init_null(dst);
|
|
+ vsir_dst_operand_init_null(dst);
|
|
}
|
|
}
|
|
|
|
@@ -2947,8 +3164,8 @@ static enum vkd3d_result vsir_program_remap_output_signature(struct vsir_program
|
|
e = &signature->elements[j];
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- dst_param_init_output(&ins->dst[0], VSIR_DATA_F32, e->register_index, e->mask);
|
|
- vsir_src_param_init(&ins->src[0], VKD3DSPR_IMMCONST, VSIR_DATA_F32, 0);
|
|
+ vsir_dst_operand_init_output(&ins->dst[0], VSIR_DATA_F32, e->register_index, e->mask);
|
|
+ vsir_src_operand_init(&ins->src[0], VKD3DSPR_IMMCONST, VSIR_DATA_F32, 0);
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
ins = vsir_program_iterator_next(&it);
|
|
@@ -2982,11 +3199,6 @@ struct hull_flattener
|
|
unsigned int orig_ssa_count;
|
|
};
|
|
|
|
-static bool flattener_is_in_fork_or_join_phase(const struct hull_flattener *flattener)
|
|
-{
|
|
- return flattener->phase == VSIR_OP_HS_FORK_PHASE || flattener->phase == VSIR_OP_HS_JOIN_PHASE;
|
|
-}
|
|
-
|
|
static void flattener_fixup_ssa_register(struct hull_flattener *normaliser,
|
|
struct vkd3d_shader_register *reg, unsigned int instance_id)
|
|
{
|
|
@@ -3108,9 +3320,9 @@ static enum vkd3d_result flattener_flatten_phases(struct hull_flattener *normali
|
|
normaliser->phase = VSIR_OP_INVALID;
|
|
for (ins = vsir_program_iterator_head(&it); ins; ins = vsir_program_iterator_next(&it))
|
|
{
|
|
- if (ins->opcode == VSIR_OP_HS_FORK_PHASE || ins->opcode == VSIR_OP_HS_JOIN_PHASE)
|
|
+ if (vsir_opcode_is_fork_or_join_phase(ins->opcode))
|
|
{
|
|
- b = flattener_is_in_fork_or_join_phase(normaliser);
|
|
+ b = vsir_opcode_is_fork_or_join_phase(normaliser->phase);
|
|
/* Reset the phase info. */
|
|
phase_body_it_valid = false;
|
|
normaliser->phase = ins->opcode;
|
|
@@ -3194,19 +3406,14 @@ struct control_point_normaliser
|
|
{
|
|
struct vsir_program *program;
|
|
enum vkd3d_shader_opcode phase;
|
|
- struct vkd3d_shader_src_param *outpointid_param;
|
|
+ struct vsir_src_operand *outpointid_param;
|
|
};
|
|
|
|
-static bool control_point_normaliser_is_in_control_point_phase(const struct control_point_normaliser *normaliser)
|
|
+struct vsir_src_operand *vsir_program_create_outpointid_param(struct vsir_program *program)
|
|
{
|
|
- return normaliser->phase == VSIR_OP_HS_CONTROL_POINT_PHASE;
|
|
-}
|
|
-
|
|
-struct vkd3d_shader_src_param *vsir_program_create_outpointid_param(struct vsir_program *program)
|
|
-{
|
|
- struct vkd3d_shader_src_param *rel_addr;
|
|
+ struct vsir_src_operand *rel_addr;
|
|
|
|
- if (!(rel_addr = vsir_program_get_src_params(program, 1)))
|
|
+ if (!(rel_addr = vsir_program_get_src_operands(program, 1)))
|
|
return NULL;
|
|
|
|
vsir_register_init(&rel_addr->reg, VKD3DSPR_OUTPOINTID, VSIR_DATA_U32, 0);
|
|
@@ -3216,12 +3423,12 @@ struct vkd3d_shader_src_param *vsir_program_create_outpointid_param(struct vsir_
|
|
return rel_addr;
|
|
}
|
|
|
|
-static void shader_dst_param_normalise_outpointid(struct vkd3d_shader_dst_param *dst_param,
|
|
+static void vsir_dst_operand_normalise_outpointid(struct vsir_dst_operand *dst,
|
|
struct control_point_normaliser *normaliser)
|
|
{
|
|
- struct vkd3d_shader_register *reg = &dst_param->reg;
|
|
+ struct vkd3d_shader_register *reg = &dst->reg;
|
|
|
|
- if (control_point_normaliser_is_in_control_point_phase(normaliser) && reg->type == VKD3DSPR_OUTPUT)
|
|
+ if (vsir_opcode_is_control_point_phase(normaliser->phase) && reg->type == VKD3DSPR_OUTPUT)
|
|
{
|
|
/* The TPF reader validates idx_count. */
|
|
VKD3D_ASSERT(reg->idx_count == 1);
|
|
@@ -3257,9 +3464,9 @@ static enum vkd3d_result control_point_normaliser_emit_hs_input(struct control_p
|
|
continue;
|
|
|
|
vsir_instruction_init(ins, location, VSIR_OP_MOV);
|
|
- ins->dst = vsir_program_get_dst_params(normaliser->program, 1);
|
|
+ ins->dst = vsir_program_get_dst_operands(normaliser->program, 1);
|
|
ins->dst_count = 1;
|
|
- ins->src = vsir_program_get_src_params(normaliser->program, 1);
|
|
+ ins->src = vsir_program_get_src_operands(normaliser->program, 1);
|
|
ins->src_count = 1;
|
|
|
|
if (!ins->dst || ! ins->src)
|
|
@@ -3270,13 +3477,13 @@ static enum vkd3d_result control_point_normaliser_emit_hs_input(struct control_p
|
|
|
|
VKD3D_ASSERT(normaliser->outpointid_param);
|
|
|
|
- vsir_dst_param_init_io(&ins->dst[0], VKD3DSPR_OUTPUT, e, 2);
|
|
+ vsir_dst_operand_init_io(&ins->dst[0], VKD3DSPR_OUTPUT, e, 2);
|
|
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->dst[0].reg.idx[0].offset = 0;
|
|
ins->dst[0].reg.idx[0].rel_addr = normaliser->outpointid_param;
|
|
ins->dst[0].reg.idx[1].offset = e->register_index;
|
|
|
|
- vsir_src_param_init_io(&ins->src[0], VKD3DSPR_INPUT, e, 2);
|
|
+ vsir_src_operand_init_io(&ins->src[0], VKD3DSPR_INPUT, e, 2);
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[0].reg.idx[0].offset = 0;
|
|
ins->src[0].reg.idx[0].rel_addr = normaliser->outpointid_param;
|
|
@@ -3331,7 +3538,9 @@ static enum vkd3d_result instruction_array_normalise_hull_shader_control_point_i
|
|
if (vsir_instruction_is_dcl(ins))
|
|
break;
|
|
for (j = 0; j < ins->dst_count; ++j)
|
|
- shader_dst_param_normalise_outpointid(&ins->dst[j], &normaliser);
|
|
+ {
|
|
+ vsir_dst_operand_normalise_outpointid(&ins->dst[j], &normaliser);
|
|
+ }
|
|
break;
|
|
}
|
|
}
|
|
@@ -3386,12 +3595,13 @@ struct io_normaliser
|
|
struct shader_signature *input_signature;
|
|
struct shader_signature *output_signature;
|
|
struct shader_signature *patch_constant_signature;
|
|
+ struct vsir_normalisation_flags *normalisation_flags;
|
|
|
|
enum vkd3d_shader_opcode phase;
|
|
|
|
- struct vkd3d_shader_dst_param *input_dcl_params[MAX_REG_OUTPUT];
|
|
- struct vkd3d_shader_dst_param *output_dcl_params[MAX_REG_OUTPUT];
|
|
- struct vkd3d_shader_dst_param *pc_dcl_params[MAX_REG_OUTPUT];
|
|
+ struct vsir_dst_operand *input_dcl_params[MAX_REG_OUTPUT];
|
|
+ struct vsir_dst_operand *output_dcl_params[MAX_REG_OUTPUT];
|
|
+ struct vsir_dst_operand *pc_dcl_params[MAX_REG_OUTPUT];
|
|
struct io_normaliser_register_data input_range_map[MAX_REG_OUTPUT];
|
|
struct io_normaliser_register_data output_range_map[MAX_REG_OUTPUT];
|
|
struct io_normaliser_register_data pc_range_map[MAX_REG_OUTPUT];
|
|
@@ -3399,11 +3609,6 @@ struct io_normaliser
|
|
bool use_vocp;
|
|
};
|
|
|
|
-static bool io_normaliser_is_in_fork_or_join_phase(const struct io_normaliser *normaliser)
|
|
-{
|
|
- return normaliser->phase == VSIR_OP_HS_FORK_PHASE || normaliser->phase == VSIR_OP_HS_JOIN_PHASE;
|
|
-}
|
|
-
|
|
static bool shader_signature_find_element_for_reg(const struct shader_signature *signature,
|
|
unsigned int reg_idx, unsigned int write_mask, unsigned int *element_idx)
|
|
{
|
|
@@ -3521,7 +3726,7 @@ static enum vkd3d_result io_normaliser_add_index_range(struct io_normaliser *nor
|
|
signature = normaliser->output_signature;
|
|
break;
|
|
case VKD3DSPR_OUTPUT:
|
|
- if (!io_normaliser_is_in_fork_or_join_phase(normaliser))
|
|
+ if (!vsir_opcode_is_fork_or_join_phase(normaliser->phase))
|
|
{
|
|
range_map = normaliser->output_range_map;
|
|
signature = normaliser->output_signature;
|
|
@@ -3793,21 +3998,20 @@ static unsigned int shader_register_normalise_arrayed_addressing(struct vkd3d_sh
|
|
return id_idx;
|
|
}
|
|
|
|
-static bool shader_dst_param_io_normalise(struct vkd3d_shader_dst_param *dst_param,
|
|
- struct io_normaliser *normaliser)
|
|
- {
|
|
+static bool vsir_dst_operand_io_normalise(struct vsir_dst_operand *dst, struct io_normaliser *normaliser)
|
|
+{
|
|
unsigned int id_idx, reg_idx, write_mask, element_idx;
|
|
- struct vkd3d_shader_register *reg = &dst_param->reg;
|
|
+ struct vkd3d_shader_register *reg = &dst->reg;
|
|
const struct shader_signature *signature;
|
|
const struct signature_element *e;
|
|
|
|
- write_mask = dst_param->write_mask;
|
|
+ write_mask = dst->write_mask;
|
|
|
|
switch (reg->type)
|
|
{
|
|
case VKD3DSPR_OUTPUT:
|
|
reg_idx = reg->idx[reg->idx_count - 1].offset;
|
|
- if (io_normaliser_is_in_fork_or_join_phase(normaliser))
|
|
+ if (vsir_opcode_is_fork_or_join_phase(normaliser->phase))
|
|
{
|
|
signature = normaliser->patch_constant_signature;
|
|
/* Convert patch constant outputs to the patch constant register type to avoid the need
|
|
@@ -3851,7 +4055,7 @@ static bool shader_dst_param_io_normalise(struct vkd3d_shader_dst_param *dst_par
|
|
if (reg->idx[0].offset > 0)
|
|
{
|
|
write_mask = VKD3DSP_WRITEMASK_0;
|
|
- dst_param->write_mask = write_mask;
|
|
+ dst->write_mask = write_mask;
|
|
}
|
|
/* Leave point size as a system value for the backends to consume. */
|
|
if (reg->idx[0].offset == VSIR_RASTOUT_POINT_SIZE)
|
|
@@ -3870,7 +4074,7 @@ static bool shader_dst_param_io_normalise(struct vkd3d_shader_dst_param *dst_par
|
|
vkd3d_unreachable();
|
|
e = &signature->elements[element_idx];
|
|
|
|
- if ((e->register_count > 1 || vsir_sysval_semantic_is_tess_factor(e->sysval_semantic)))
|
|
+ if (vsir_signature_element_is_array(e, normaliser->normalisation_flags))
|
|
id_idx = shader_register_normalise_arrayed_addressing(reg, id_idx, e->register_index);
|
|
|
|
/* Replace the register index with the signature element index */
|
|
@@ -3880,11 +4084,11 @@ static bool shader_dst_param_io_normalise(struct vkd3d_shader_dst_param *dst_par
|
|
return true;
|
|
}
|
|
|
|
-static void shader_src_param_io_normalise(struct vkd3d_shader_src_param *src_param,
|
|
+static void vsir_src_operand_io_normalise(struct vsir_src_operand *src,
|
|
struct io_normaliser *normaliser, struct vkd3d_shader_instruction *ins)
|
|
{
|
|
unsigned int i, id_idx, reg_idx, write_mask, element_idx, component_idx;
|
|
- struct vkd3d_shader_register *reg = &src_param->reg;
|
|
+ struct vkd3d_shader_register *reg = &src->reg;
|
|
const struct shader_signature *signature;
|
|
const struct signature_element *e;
|
|
|
|
@@ -3923,7 +4127,7 @@ static void shader_src_param_io_normalise(struct vkd3d_shader_src_param *src_par
|
|
|
|
case VKD3DSPR_OUTCONTROLPOINT:
|
|
reg->type = VKD3DSPR_OUTPUT;
|
|
- if (io_normaliser_is_in_fork_or_join_phase(normaliser))
|
|
+ if (vsir_opcode_is_fork_or_join_phase(normaliser->phase))
|
|
normaliser->use_vocp = true;
|
|
/* fall through */
|
|
case VKD3DSPR_OUTPUT:
|
|
@@ -3942,7 +4146,7 @@ static void shader_src_param_io_normalise(struct vkd3d_shader_src_param *src_par
|
|
}
|
|
|
|
id_idx = reg->idx_count - 1;
|
|
- write_mask = VKD3DSP_WRITEMASK_0 << vsir_swizzle_get_component(src_param->swizzle, 0);
|
|
+ write_mask = VKD3DSP_WRITEMASK_0 << vsir_swizzle_get_component(src->swizzle, 0);
|
|
if (!shader_signature_find_element_for_reg(signature, reg_idx, write_mask, &element_idx))
|
|
{
|
|
vkd3d_shader_error(normaliser->message_context, &ins->location, VKD3D_SHADER_ERROR_VSIR_INVALID_SIGNATURE,
|
|
@@ -3952,7 +4156,7 @@ static void shader_src_param_io_normalise(struct vkd3d_shader_src_param *src_par
|
|
}
|
|
|
|
e = &signature->elements[element_idx];
|
|
- if ((e->register_count > 1 || vsir_sysval_semantic_is_tess_factor(e->sysval_semantic)))
|
|
+ if (vsir_signature_element_is_array(e, normaliser->normalisation_flags))
|
|
id_idx = shader_register_normalise_arrayed_addressing(reg, id_idx, e->register_index);
|
|
reg->idx[id_idx].offset = element_idx;
|
|
reg->idx_count = id_idx + 1;
|
|
@@ -3960,8 +4164,10 @@ static void shader_src_param_io_normalise(struct vkd3d_shader_src_param *src_par
|
|
if ((component_idx = vsir_write_mask_get_component_idx(e->mask)))
|
|
{
|
|
for (i = 0; i < VKD3D_VEC4_SIZE; ++i)
|
|
- if (vsir_swizzle_get_component(src_param->swizzle, i))
|
|
- src_param->swizzle -= component_idx << VKD3D_SHADER_SWIZZLE_SHIFT(i);
|
|
+ {
|
|
+ if (vsir_swizzle_get_component(src->swizzle, i))
|
|
+ src->swizzle -= component_idx << VKD3D_SHADER_SWIZZLE_SHIFT(i);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -3984,9 +4190,13 @@ static void shader_instruction_normalise_io_params(struct vkd3d_shader_instructi
|
|
if (vsir_instruction_is_dcl(ins))
|
|
break;
|
|
for (i = 0; i < ins->dst_count; ++i)
|
|
- shader_dst_param_io_normalise(&ins->dst[i], normaliser);
|
|
+ {
|
|
+ vsir_dst_operand_io_normalise(&ins->dst[i], normaliser);
|
|
+ }
|
|
for (i = 0; i < ins->src_count; ++i)
|
|
- shader_src_param_io_normalise(&ins->src[i], normaliser, ins);
|
|
+ {
|
|
+ vsir_src_operand_io_normalise(&ins->src[i], normaliser, ins);
|
|
+ }
|
|
break;
|
|
}
|
|
}
|
|
@@ -4007,6 +4217,7 @@ static enum vkd3d_result vsir_program_normalise_io_registers(struct vsir_program
|
|
normaliser.input_signature = &program->input_signature;
|
|
normaliser.output_signature = &program->output_signature;
|
|
normaliser.patch_constant_signature = &program->patch_constant_signature;
|
|
+ normaliser.normalisation_flags = &program->normalisation_flags;
|
|
|
|
for (ins = vsir_program_iterator_head(&it); ins; ins = vsir_program_iterator_next(&it))
|
|
{
|
|
@@ -4059,8 +4270,7 @@ struct flat_constants_normaliser
|
|
};
|
|
|
|
static bool get_flat_constant_register_type(const struct vkd3d_shader_register *reg,
|
|
- enum vkd3d_shader_d3dbc_constant_register *set, uint32_t *index,
|
|
- struct vkd3d_shader_src_param **rel_addr)
|
|
+ enum vkd3d_shader_d3dbc_constant_register *set, uint32_t *index, struct vsir_src_operand **rel_addr)
|
|
{
|
|
static const struct
|
|
{
|
|
@@ -4091,43 +4301,43 @@ static bool get_flat_constant_register_type(const struct vkd3d_shader_register *
|
|
return false;
|
|
}
|
|
|
|
-static void shader_register_normalise_flat_constants(struct vkd3d_shader_src_param *param,
|
|
+static void shader_register_normalise_flat_constants(struct vsir_src_operand *src,
|
|
const struct flat_constants_normaliser *normaliser)
|
|
{
|
|
enum vkd3d_shader_d3dbc_constant_register set;
|
|
- struct vkd3d_shader_src_param *rel_addr;
|
|
+ struct vsir_src_operand *rel_addr;
|
|
unsigned int c;
|
|
uint32_t index;
|
|
size_t i, j;
|
|
|
|
- if (!get_flat_constant_register_type(¶m->reg, &set, &index, &rel_addr))
|
|
+ if (!get_flat_constant_register_type(&src->reg, &set, &index, &rel_addr))
|
|
return;
|
|
|
|
for (i = 0; i < normaliser->def_count; ++i)
|
|
{
|
|
if (normaliser->defs[i].set == set && normaliser->defs[i].index == index)
|
|
{
|
|
- param->reg.type = VKD3DSPR_IMMCONST;
|
|
- param->reg.idx_count = 0;
|
|
- param->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->reg.type = VKD3DSPR_IMMCONST;
|
|
+ src->reg.idx_count = 0;
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
for (j = 0; j < 4; ++j)
|
|
{
|
|
- c = vsir_swizzle_get_component(param->swizzle, j);
|
|
- param->reg.u.immconst_u32[j] = normaliser->defs[i].value[c];
|
|
+ c = vsir_swizzle_get_component(src->swizzle, j);
|
|
+ src->reg.u.immconst_u32[j] = normaliser->defs[i].value[c];
|
|
}
|
|
- param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
+ src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
return;
|
|
}
|
|
}
|
|
|
|
- param->reg.type = VKD3DSPR_CONSTBUFFER;
|
|
- param->reg.idx[0].offset = set; /* register ID */
|
|
- param->reg.idx[0].rel_addr = NULL;
|
|
- param->reg.idx[1].offset = set; /* register index */
|
|
- param->reg.idx[1].rel_addr = NULL;
|
|
- param->reg.idx[2].offset = index; /* buffer index */
|
|
- param->reg.idx[2].rel_addr = rel_addr;
|
|
- param->reg.idx_count = 3;
|
|
+ src->reg.type = VKD3DSPR_CONSTBUFFER;
|
|
+ src->reg.idx[0].offset = set; /* register ID */
|
|
+ src->reg.idx[0].rel_addr = NULL;
|
|
+ src->reg.idx[1].offset = set; /* register index */
|
|
+ src->reg.idx[1].rel_addr = NULL;
|
|
+ src->reg.idx[2].offset = index; /* buffer index */
|
|
+ src->reg.idx[2].rel_addr = rel_addr;
|
|
+ src->reg.idx_count = 3;
|
|
}
|
|
|
|
static enum vkd3d_result vsir_program_normalise_flat_constants(struct vsir_program *program,
|
|
@@ -4280,7 +4490,7 @@ static void vsir_program_replace_instructions(struct vsir_program *program,
|
|
|
|
struct cf_flattener_if_info
|
|
{
|
|
- struct vkd3d_shader_src_param *false_param;
|
|
+ struct vsir_src_operand *false_param;
|
|
unsigned int id;
|
|
uint32_t merge_block_id;
|
|
unsigned int else_block_id;
|
|
@@ -4302,7 +4512,7 @@ struct cf_flattener_switch_case
|
|
struct cf_flattener_switch_info
|
|
{
|
|
struct vsir_program_iterator ins_it;
|
|
- const struct vkd3d_shader_src_param *condition;
|
|
+ const struct vsir_src_operand *condition;
|
|
unsigned int id;
|
|
unsigned int merge_block_id;
|
|
unsigned int default_block_id;
|
|
@@ -4392,19 +4602,20 @@ static unsigned int cf_flattener_alloc_block_id(struct cf_flattener *flattener)
|
|
return ++flattener->block_id;
|
|
}
|
|
|
|
-static struct vkd3d_shader_src_param *instruction_src_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
+static struct vsir_src_operand *instruction_src_params_alloc(struct vkd3d_shader_instruction *ins,
|
|
unsigned int count, struct cf_flattener *flattener)
|
|
{
|
|
- struct vkd3d_shader_src_param *params;
|
|
+ struct vsir_src_operand *src;
|
|
|
|
- if (!(params = vsir_program_get_src_params(flattener->program, count)))
|
|
+ if (!(src = vsir_program_get_src_operands(flattener->program, count)))
|
|
{
|
|
cf_flattener_set_error(flattener, VKD3D_ERROR_OUT_OF_MEMORY);
|
|
return NULL;
|
|
}
|
|
- ins->src = params;
|
|
+ ins->src = src;
|
|
ins->src_count = count;
|
|
- return params;
|
|
+
|
|
+ return src;
|
|
}
|
|
|
|
static void cf_flattener_emit_label(struct cf_flattener *flattener, unsigned int label_id)
|
|
@@ -4421,12 +4632,11 @@ static void cf_flattener_emit_label(struct cf_flattener *flattener, unsigned int
|
|
}
|
|
|
|
/* For conditional branches, this returns the false target branch parameter. */
|
|
-static struct vkd3d_shader_src_param *cf_flattener_emit_branch(struct cf_flattener *flattener,
|
|
- unsigned int merge_block_id, unsigned int continue_block_id,
|
|
- const struct vkd3d_shader_src_param *condition, unsigned int true_id, unsigned int false_id,
|
|
- unsigned int flags)
|
|
+static struct vsir_src_operand *cf_flattener_emit_branch(struct cf_flattener *flattener,
|
|
+ unsigned int merge_block_id, unsigned int continue_block_id, const struct vsir_src_operand *condition,
|
|
+ unsigned int true_id, unsigned int false_id, unsigned int flags)
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params, *false_branch_param;
|
|
+ struct vsir_src_operand *src, *false_branch;
|
|
struct vkd3d_shader_instruction *ins;
|
|
|
|
if (!(ins = cf_flattener_instruction_append(flattener)))
|
|
@@ -4435,51 +4645,51 @@ static struct vkd3d_shader_src_param *cf_flattener_emit_branch(struct cf_flatten
|
|
|
|
if (condition)
|
|
{
|
|
- if (!(src_params = instruction_src_params_alloc(ins, 4 + !!continue_block_id, flattener)))
|
|
+ if (!(src = instruction_src_params_alloc(ins, 4 + !!continue_block_id, flattener)))
|
|
{
|
|
vkd3d_shader_instruction_make_nop(ins);
|
|
return NULL;
|
|
}
|
|
- src_params[0] = *condition;
|
|
+ src[0] = *condition;
|
|
if (flags == VKD3D_SHADER_CONDITIONAL_OP_Z)
|
|
{
|
|
- vsir_src_param_init_label(&src_params[1], false_id);
|
|
- vsir_src_param_init_label(&src_params[2], true_id);
|
|
- false_branch_param = &src_params[1];
|
|
+ vsir_src_operand_init_label(&src[1], false_id);
|
|
+ vsir_src_operand_init_label(&src[2], true_id);
|
|
+ false_branch = &src[1];
|
|
}
|
|
else
|
|
{
|
|
- vsir_src_param_init_label(&src_params[1], true_id);
|
|
- vsir_src_param_init_label(&src_params[2], false_id);
|
|
- false_branch_param = &src_params[2];
|
|
+ vsir_src_operand_init_label(&src[1], true_id);
|
|
+ vsir_src_operand_init_label(&src[2], false_id);
|
|
+ false_branch = &src[2];
|
|
}
|
|
- vsir_src_param_init_label(&src_params[3], merge_block_id);
|
|
+ vsir_src_operand_init_label(&src[3], merge_block_id);
|
|
if (continue_block_id)
|
|
- vsir_src_param_init_label(&src_params[4], continue_block_id);
|
|
+ vsir_src_operand_init_label(&src[4], continue_block_id);
|
|
}
|
|
else
|
|
{
|
|
- if (!(src_params = instruction_src_params_alloc(ins, merge_block_id ? 3 : 1, flattener)))
|
|
+ if (!(src = instruction_src_params_alloc(ins, merge_block_id ? 3 : 1, flattener)))
|
|
{
|
|
vkd3d_shader_instruction_make_nop(ins);
|
|
return NULL;
|
|
}
|
|
- vsir_src_param_init_label(&src_params[0], true_id);
|
|
+ vsir_src_operand_init_label(&src[0], true_id);
|
|
if (merge_block_id)
|
|
{
|
|
/* An unconditional branch may only have merge information for a loop, which
|
|
* must have both a merge block and continue block. */
|
|
- vsir_src_param_init_label(&src_params[1], merge_block_id);
|
|
- vsir_src_param_init_label(&src_params[2], continue_block_id);
|
|
+ vsir_src_operand_init_label(&src[1], merge_block_id);
|
|
+ vsir_src_operand_init_label(&src[2], continue_block_id);
|
|
}
|
|
- false_branch_param = NULL;
|
|
+ false_branch = NULL;
|
|
}
|
|
|
|
- return false_branch_param;
|
|
+ return false_branch;
|
|
}
|
|
|
|
static void cf_flattener_emit_conditional_branch_and_merge(struct cf_flattener *flattener,
|
|
- const struct vkd3d_shader_src_param *condition, unsigned int true_id, unsigned int flags)
|
|
+ const struct vsir_src_operand *condition, unsigned int true_id, unsigned int flags)
|
|
{
|
|
unsigned int merge_block_id;
|
|
|
|
@@ -4584,7 +4794,7 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
|
|
for (instruction = vsir_program_iterator_head(&it); instruction; instruction = vsir_program_iterator_next(&it))
|
|
{
|
|
unsigned int loop_header_block_id, loop_body_block_id, continue_block_id, merge_block_id, true_block_id;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ struct vsir_src_operand *src = instruction->src;
|
|
struct cf_flattener_info *cf_info;
|
|
|
|
flattener->location = instruction->location;
|
|
@@ -4747,7 +4957,6 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
|
|
|
|
case VSIR_OP_ENDSWITCH:
|
|
{
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
unsigned int j;
|
|
|
|
if (!cf_info->u.switch_.default_block_id)
|
|
@@ -4762,21 +4971,21 @@ static enum vkd3d_result cf_flattener_iterate_instruction_array(struct cf_flatte
|
|
* when new instructions are appended to the
|
|
* vkd3d_shader_instruction_array. */
|
|
dst_ins = vsir_program_iterator_current(&cf_info->u.switch_.ins_it);
|
|
- if (!(src_params = instruction_src_params_alloc(dst_ins,
|
|
+ if (!(src = instruction_src_params_alloc(dst_ins,
|
|
cf_info->u.switch_.cases_count * 2 + 3, flattener)))
|
|
{
|
|
vkd3d_free(cf_info->u.switch_.cases);
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
}
|
|
- src_params[0] = *cf_info->u.switch_.condition;
|
|
- vsir_src_param_init_label(&src_params[1], cf_info->u.switch_.default_block_id);
|
|
- vsir_src_param_init_label(&src_params[2], cf_info->u.switch_.merge_block_id);
|
|
+ src[0] = *cf_info->u.switch_.condition;
|
|
+ vsir_src_operand_init_label(&src[1], cf_info->u.switch_.default_block_id);
|
|
+ vsir_src_operand_init_label(&src[2], cf_info->u.switch_.merge_block_id);
|
|
for (j = 0; j < cf_info->u.switch_.cases_count; ++j)
|
|
{
|
|
unsigned int index = j * 2 + 3;
|
|
- vsir_src_param_init(&src_params[index], VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
- src_params[index].reg.u.immconst_u32[0] = cf_info->u.switch_.cases[j].value;
|
|
- vsir_src_param_init_label(&src_params[index + 1], cf_info->u.switch_.cases[j].block_id);
|
|
+ vsir_src_operand_init(&src[index], VKD3DSPR_IMMCONST, VSIR_DATA_U32, 0);
|
|
+ src[index].reg.u.immconst_u32[0] = cf_info->u.switch_.cases[j].value;
|
|
+ vsir_src_operand_init_label(&src[index + 1], cf_info->u.switch_.cases[j].block_id);
|
|
}
|
|
vkd3d_free(cf_info->u.switch_.cases);
|
|
|
|
@@ -4945,10 +5154,10 @@ static enum vkd3d_result vsir_program_flatten_control_flow_constructs(struct vsi
|
|
return result;
|
|
}
|
|
|
|
-static unsigned int label_from_src_param(const struct vkd3d_shader_src_param *param)
|
|
+static unsigned int label_from_src_operand(const struct vsir_src_operand *src)
|
|
{
|
|
- VKD3D_ASSERT(param->reg.type == VKD3DSPR_LABEL);
|
|
- return param->reg.idx[0].offset;
|
|
+ VKD3D_ASSERT(src->reg.type == VKD3DSPR_LABEL);
|
|
+ return src->reg.idx[0].offset;
|
|
}
|
|
|
|
/* A record represents replacing a jump from block `switch_label' to
|
|
@@ -5004,7 +5213,7 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
|
|
switch (ins->opcode)
|
|
{
|
|
case VSIR_OP_LABEL:
|
|
- current_label = label_from_src_param(&ins->src[0]);
|
|
+ current_label = label_from_src_operand(&ins->src[0]);
|
|
if (!(dst_ins = shader_instruction_array_append(&instructions)))
|
|
goto fail;
|
|
*dst_ins = *ins;
|
|
@@ -5021,7 +5230,7 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
|
|
}
|
|
|
|
case_count = (ins->src_count - 3) / 2;
|
|
- default_label = label_from_src_param(&ins->src[1]);
|
|
+ default_label = label_from_src_operand(&ins->src[1]);
|
|
|
|
/* In principle we can have a switch with no cases, and we
|
|
* just have to jump to the default label. */
|
|
@@ -5035,14 +5244,14 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
|
|
vkd3d_shader_instruction_make_nop(dst_ins);
|
|
goto fail;
|
|
}
|
|
- vsir_src_param_init_label(&dst_ins->src[0], default_label);
|
|
+ vsir_src_operand_init_label(&dst_ins->src[0], default_label);
|
|
}
|
|
|
|
if_label = current_label;
|
|
|
|
for (j = 0; j < case_count; ++j)
|
|
{
|
|
- unsigned int fallthrough_label, case_label = label_from_src_param(&ins->src[3 + 2 * j + 1]);
|
|
+ unsigned int fallthrough_label, case_label = label_from_src_operand(&ins->src[3 + 2 * j + 1]);
|
|
|
|
if (!(dst_ins = shader_instruction_array_append(&instructions)))
|
|
goto fail;
|
|
@@ -5051,7 +5260,7 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
|
|
vkd3d_shader_instruction_make_nop(dst_ins);
|
|
goto fail;
|
|
}
|
|
- dst_param_init_ssa_bool(&dst_ins->dst[0], ssa_count);
|
|
+ vsir_dst_operand_init_ssa_bool(&dst_ins->dst[0], ssa_count);
|
|
dst_ins->src[0] = ins->src[0];
|
|
dst_ins->src[1] = ins->src[3 + 2 * j];
|
|
|
|
@@ -5070,9 +5279,9 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
|
|
vkd3d_shader_instruction_make_nop(dst_ins);
|
|
goto fail;
|
|
}
|
|
- src_param_init_ssa_bool(&dst_ins->src[0], ssa_count);
|
|
- vsir_src_param_init_label(&dst_ins->src[1], case_label);
|
|
- vsir_src_param_init_label(&dst_ins->src[2], fallthrough_label);
|
|
+ vsir_src_operand_init_ssa_bool(&dst_ins->src[0], ssa_count);
|
|
+ vsir_src_operand_init_label(&dst_ins->src[1], case_label);
|
|
+ vsir_src_operand_init_label(&dst_ins->src[2], fallthrough_label);
|
|
|
|
++ssa_count;
|
|
|
|
@@ -5095,7 +5304,7 @@ static enum vkd3d_result vsir_program_lower_switch_to_selection_ladder(struct vs
|
|
vkd3d_shader_instruction_make_nop(dst_ins);
|
|
goto fail;
|
|
}
|
|
- vsir_src_param_init_label(&dst_ins->src[0], ++block_count);
|
|
+ vsir_src_operand_init_label(&dst_ins->src[0], ++block_count);
|
|
|
|
if_label = block_count;
|
|
}
|
|
@@ -5159,8 +5368,8 @@ struct ssas_to_temps_block_info
|
|
{
|
|
struct phi_incoming_to_temp
|
|
{
|
|
- struct vkd3d_shader_src_param *src;
|
|
- struct vkd3d_shader_dst_param *dst;
|
|
+ struct vsir_src_operand *src;
|
|
+ struct vsir_dst_operand *dst;
|
|
} *incomings;
|
|
size_t incoming_capacity;
|
|
size_t incoming_count;
|
|
@@ -5233,7 +5442,7 @@ static enum vkd3d_result vsir_program_materialise_phi_ssas_to_temps_in_function(
|
|
struct phi_incoming_to_temp *incoming;
|
|
unsigned int label;
|
|
|
|
- label = label_from_src_param(&ins->src[j + 1]);
|
|
+ label = label_from_src_operand(&ins->src[j + 1]);
|
|
VKD3D_ASSERT(label);
|
|
|
|
info = &block_info[label - 1];
|
|
@@ -5273,7 +5482,7 @@ static enum vkd3d_result vsir_program_materialise_phi_ssas_to_temps_in_function(
|
|
switch (ins->opcode)
|
|
{
|
|
case VSIR_OP_LABEL:
|
|
- current_label = label_from_src_param(&ins->src[0]);
|
|
+ current_label = label_from_src_operand(&ins->src[0]);
|
|
break;
|
|
|
|
case VSIR_OP_BRANCH:
|
|
@@ -5517,7 +5726,7 @@ struct vsir_cfg_structure
|
|
} loop;
|
|
struct vsir_cfg_structure_selection
|
|
{
|
|
- struct vkd3d_shader_src_param *condition;
|
|
+ struct vsir_src_operand *condition;
|
|
struct vsir_cfg_structure_list if_body;
|
|
struct vsir_cfg_structure_list else_body;
|
|
bool invert_condition;
|
|
@@ -5534,7 +5743,7 @@ struct vsir_cfg_structure
|
|
JUMP_RET,
|
|
} type;
|
|
unsigned int target;
|
|
- struct vkd3d_shader_src_param *condition;
|
|
+ struct vsir_src_operand *condition;
|
|
bool invert_condition;
|
|
bool needs_launcher;
|
|
} jump;
|
|
@@ -5719,10 +5928,10 @@ static bool vsir_block_dominates(struct vsir_block *b1, struct vsir_block *b2)
|
|
return bitmap_is_set(b1->dominates, b2->label - 1);
|
|
}
|
|
|
|
-static enum vkd3d_result vsir_cfg_add_edge(struct vsir_cfg *cfg, struct vsir_block *block,
|
|
- struct vkd3d_shader_src_param *successor_param)
|
|
+static enum vkd3d_result vsir_cfg_add_edge(struct vsir_cfg *cfg,
|
|
+ struct vsir_block *block, struct vsir_src_operand *successor_operand)
|
|
{
|
|
- unsigned int target = label_from_src_param(successor_param);
|
|
+ unsigned int target = label_from_src_operand(successor_operand);
|
|
struct vsir_block *successor = &cfg->blocks[target - 1];
|
|
enum vkd3d_result ret;
|
|
|
|
@@ -5903,7 +6112,7 @@ static enum vkd3d_result vsir_cfg_init(struct vsir_cfg *cfg, struct vsir_program
|
|
|
|
case VSIR_OP_LABEL:
|
|
{
|
|
- unsigned int label = label_from_src_param(&ins->src[0]);
|
|
+ unsigned int label = label_from_src_operand(&ins->src[0]);
|
|
|
|
VKD3D_ASSERT(!current_block);
|
|
VKD3D_ASSERT(label > 0);
|
|
@@ -6656,7 +6865,7 @@ static enum vkd3d_result vsir_cfg_build_structured_program(struct vsir_cfg *cfg)
|
|
|
|
if (vsir_register_is_label(&end->src[0].reg))
|
|
{
|
|
- unsigned int target = label_from_src_param(&end->src[0]);
|
|
+ unsigned int target = label_from_src_operand(&end->src[0]);
|
|
struct vsir_block *successor = &cfg->blocks[target - 1];
|
|
|
|
vsir_cfg_compute_edge_action(cfg, block, successor, &action_true);
|
|
@@ -6664,12 +6873,12 @@ static enum vkd3d_result vsir_cfg_build_structured_program(struct vsir_cfg *cfg)
|
|
}
|
|
else
|
|
{
|
|
- unsigned int target = label_from_src_param(&end->src[1]);
|
|
+ unsigned int target = label_from_src_operand(&end->src[1]);
|
|
struct vsir_block *successor = &cfg->blocks[target - 1];
|
|
|
|
vsir_cfg_compute_edge_action(cfg, block, successor, &action_true);
|
|
|
|
- target = label_from_src_param(&end->src[2]);
|
|
+ target = label_from_src_operand(&end->src[2]);
|
|
successor = &cfg->blocks[target - 1];
|
|
|
|
vsir_cfg_compute_edge_action(cfg, block, successor, &action_false);
|
|
@@ -7265,9 +7474,9 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
|
|
|
|
++target->temp_count;
|
|
|
|
- dst_param_init_temp_bool(&ins->dst[0], target->temp_count - 1);
|
|
- src_param_init_temp_uint(&ins->src[0], target->jump_target_temp_idx);
|
|
- src_param_init_const_uint(&ins->src[1], outer_continue_target);
|
|
+ vsir_dst_operand_init_temp_bool(&ins->dst[0], target->temp_count - 1);
|
|
+ vsir_src_operand_init_temp_u32(&ins->src[0], target->jump_target_temp_idx);
|
|
+ vsir_src_operand_init_const_u32(&ins->src[1], outer_continue_target);
|
|
|
|
if (!(ins = shader_instruction_array_append(&target->instructions)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
@@ -7277,7 +7486,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
}
|
|
|
|
- src_param_init_temp_bool(&ins->src[0], target->temp_count - 1);
|
|
+ vsir_src_operand_init_temp_bool(&ins->src[0], target->temp_count - 1);
|
|
|
|
ins = shader_instruction_array_append(&target->instructions);
|
|
if (!vsir_instruction_init_with_params(cfg->program, ins, &no_loc, VSIR_OP_IEQ, 1, 2))
|
|
@@ -7288,9 +7497,9 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
|
|
|
|
++target->temp_count;
|
|
|
|
- dst_param_init_temp_bool(&ins->dst[0], target->temp_count - 1);
|
|
- src_param_init_temp_uint(&ins->src[0], target->jump_target_temp_idx);
|
|
- src_param_init_const_uint(&ins->src[1], inner_break_target);
|
|
+ vsir_dst_operand_init_temp_bool(&ins->dst[0], target->temp_count - 1);
|
|
+ vsir_src_operand_init_temp_u32(&ins->src[0], target->jump_target_temp_idx);
|
|
+ vsir_src_operand_init_const_u32(&ins->src[1], inner_break_target);
|
|
|
|
if (!(ins = shader_instruction_array_append(&target->instructions)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
@@ -7301,7 +7510,7 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_loop(struct vsir_cfg *cfg,
|
|
}
|
|
ins->flags |= VKD3D_SHADER_CONDITIONAL_OP_Z;
|
|
|
|
- src_param_init_temp_bool(&ins->src[0], target->temp_count - 1);
|
|
+ vsir_src_operand_init_temp_bool(&ins->src[0], target->temp_count - 1);
|
|
}
|
|
|
|
return VKD3D_OK;
|
|
@@ -7399,8 +7608,8 @@ static enum vkd3d_result vsir_cfg_structure_list_emit_jump(struct vsir_cfg *cfg,
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
}
|
|
|
|
- dst_param_init_temp_uint(&ins->dst[0], target->jump_target_temp_idx);
|
|
- src_param_init_const_uint(&ins->src[0], jump_target);
|
|
+ vsir_dst_operand_init_temp_u32(&ins->dst[0], target->jump_target_temp_idx);
|
|
+ vsir_src_operand_init_const_u32(&ins->src[0], jump_target);
|
|
}
|
|
|
|
if (!(ins = shader_instruction_array_append(&target->instructions)))
|
|
@@ -7814,7 +8023,7 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_DISCARD, 0, 1);
|
|
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_Z;
|
|
- src_param_init_const_uint(&ins->src[0], 0);
|
|
+ vsir_src_operand_init_const_u32(&ins->src[0], 0);
|
|
vsir_program_iterator_next(it);
|
|
|
|
return VKD3D_OK;
|
|
@@ -7827,15 +8036,15 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
|
|
{
|
|
case VKD3D_SHADER_PARAMETER_DATA_TYPE_FLOAT32:
|
|
vsir_instruction_init_with_params(program, ins, &loc, opcodes[compare_func].float_opcode, 1, 2);
|
|
- src_param_init_temp_float(&ins->src[opcodes[compare_func].swap ? 1 : 0], colour_temp);
|
|
- src_param_init_parameter(&ins->src[opcodes[compare_func].swap ? 0 : 1],
|
|
+ vsir_src_operand_init_temp_f32(&ins->src[opcodes[compare_func].swap ? 1 : 0], colour_temp);
|
|
+ vsir_src_operand_init_parameter(&ins->src[opcodes[compare_func].swap ? 0 : 1],
|
|
VKD3D_SHADER_PARAMETER_NAME_ALPHA_TEST_REF, VSIR_DATA_F32);
|
|
break;
|
|
|
|
case VKD3D_SHADER_PARAMETER_DATA_TYPE_UINT32:
|
|
vsir_instruction_init_with_params(program, ins, &loc, opcodes[compare_func].uint_opcode, 1, 2);
|
|
- src_param_init_temp_uint(&ins->src[opcodes[compare_func].swap ? 1 : 0], colour_temp);
|
|
- src_param_init_parameter(&ins->src[opcodes[compare_func].swap ? 0 : 1],
|
|
+ vsir_src_operand_init_temp_u32(&ins->src[opcodes[compare_func].swap ? 1 : 0], colour_temp);
|
|
+ vsir_src_operand_init_parameter(&ins->src[opcodes[compare_func].swap ? 0 : 1],
|
|
VKD3D_SHADER_PARAMETER_NAME_ALPHA_TEST_REF, VSIR_DATA_U32);
|
|
break;
|
|
|
|
@@ -7849,24 +8058,24 @@ static enum vkd3d_result insert_alpha_test_before_ret(struct vsir_program *progr
|
|
return VKD3D_ERROR_NOT_IMPLEMENTED;
|
|
}
|
|
|
|
- dst_param_init_ssa_bool(&ins->dst[0], program->ssa_count);
|
|
+ vsir_dst_operand_init_ssa_bool(&ins->dst[0], program->ssa_count);
|
|
ins->src[opcodes[compare_func].swap ? 1 : 0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[opcodes[compare_func].swap ? 1 : 0].swizzle = VKD3D_SHADER_SWIZZLE(W, W, W, W);
|
|
|
|
ins = vsir_program_iterator_next(it);
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_DISCARD, 0, 1);
|
|
ins->flags = VKD3D_SHADER_CONDITIONAL_OP_Z;
|
|
- src_param_init_ssa_bool(&ins->src[0], program->ssa_count);
|
|
+ vsir_src_operand_init_ssa_bool(&ins->src[0], program->ssa_count);
|
|
|
|
++program->ssa_count;
|
|
|
|
ins = vsir_program_iterator_next(it);
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_OUTPUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_OUTPUT, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = colour_signature_idx;
|
|
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->dst[0].write_mask = program->output_signature.elements[colour_signature_idx].mask;
|
|
- src_param_init_temp_float(&ins->src[0], colour_temp);
|
|
+ vsir_src_operand_init_temp_f32(&ins->src[0], colour_temp);
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
|
|
@@ -7940,7 +8149,7 @@ static enum vkd3d_result vsir_program_insert_alpha_test(struct vsir_program *pro
|
|
|
|
for (size_t j = 0; j < ins->dst_count; ++j)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->dst[j];
|
|
+ struct vsir_dst_operand *dst = &ins->dst[j];
|
|
|
|
/* Note we run after I/O normalization. */
|
|
if (dst->reg.type == VKD3DSPR_OUTPUT && dst->reg.idx[0].offset == colour_signature_idx)
|
|
@@ -7971,12 +8180,12 @@ static enum vkd3d_result insert_clip_planes_before_ret(struct vsir_program *prog
|
|
continue;
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_DP4, 1, 2);
|
|
- src_param_init_temp_float4(&ins->src[0], position_temp);
|
|
- src_param_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_CLIP_PLANE_0 + i, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init_temp_f32v4(&ins->src[0], position_temp);
|
|
+ vsir_src_operand_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_CLIP_PLANE_0 + i, VSIR_DATA_F32);
|
|
ins->src[1].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
ins->src[1].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_OUTPUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_OUTPUT, VSIR_DATA_F32, 1);
|
|
if (output_idx < 4)
|
|
ins->dst[0].reg.idx[0].offset = low_signature_idx;
|
|
else
|
|
@@ -7989,11 +8198,11 @@ static enum vkd3d_result insert_clip_planes_before_ret(struct vsir_program *prog
|
|
}
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_OUTPUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_OUTPUT, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = position_signature_idx;
|
|
ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->dst[0].write_mask = program->output_signature.elements[position_signature_idx].mask;
|
|
- src_param_init_temp_float(&ins->src[0], position_temp);
|
|
+ vsir_src_operand_init_temp_f32(&ins->src[0], position_temp);
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
ins = vsir_program_iterator_next(it);
|
|
@@ -8009,9 +8218,9 @@ static enum vkd3d_result vsir_program_insert_clip_planes(struct vsir_program *pr
|
|
unsigned int low_signature_idx = ~0u, high_signature_idx = ~0u;
|
|
const struct vkd3d_shader_parameter1 *mask_parameter = NULL;
|
|
uint32_t position_signature_idx, position_temp, mask;
|
|
+ unsigned int plane_count, next_register_index;
|
|
struct signature_element *clip_element;
|
|
struct vkd3d_shader_instruction *ins;
|
|
- unsigned int plane_count;
|
|
int ret;
|
|
|
|
if (program->shader_version.type != VKD3D_SHADER_TYPE_VERTEX)
|
|
@@ -8067,16 +8276,18 @@ static enum vkd3d_result vsir_program_insert_clip_planes(struct vsir_program *pr
|
|
plane_count = vkd3d_popcount(mask);
|
|
|
|
/* Register mask is ignored since we operate after I/O normalisation. */
|
|
+ next_register_index = vsir_signature_next_location(signature);
|
|
if (!(clip_element = add_signature_element(signature, "SV_ClipDistance", 0,
|
|
- vkd3d_write_mask_from_component_count(min(plane_count, 4)), 0, VKD3DSIM_NONE)))
|
|
+ vkd3d_write_mask_from_component_count(min(plane_count, 4)), next_register_index, VKD3DSIM_NONE)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
low_signature_idx = clip_element - signature->elements;
|
|
clip_element->sysval_semantic = VKD3D_SHADER_SV_CLIP_DISTANCE;
|
|
|
|
if (plane_count > 4)
|
|
{
|
|
+ next_register_index = vsir_signature_next_location(signature);
|
|
if (!(clip_element = add_signature_element(signature, "SV_ClipDistance", 1,
|
|
- vkd3d_write_mask_from_component_count(plane_count - 4), 0, VKD3DSIM_NONE)))
|
|
+ vkd3d_write_mask_from_component_count(plane_count - 4), next_register_index, VKD3DSIM_NONE)))
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
high_signature_idx = clip_element - signature->elements;
|
|
clip_element->sysval_semantic = VKD3D_SHADER_SV_CLIP_DISTANCE;
|
|
@@ -8102,7 +8313,7 @@ static enum vkd3d_result vsir_program_insert_clip_planes(struct vsir_program *pr
|
|
|
|
for (size_t j = 0; j < ins->dst_count; ++j)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->dst[j];
|
|
+ struct vsir_dst_operand *dst = &ins->dst[j];
|
|
|
|
/* Note we run after I/O normalization. */
|
|
if (dst->reg.type == VKD3DSPR_OUTPUT && dst->reg.idx[0].offset == position_signature_idx)
|
|
@@ -8116,6 +8327,743 @@ static enum vkd3d_result vsir_program_insert_clip_planes(struct vsir_program *pr
|
|
return VKD3D_OK;
|
|
}
|
|
|
|
+struct sysval_array_normaliser
|
|
+{
|
|
+ struct vsir_transformation_context *ctx;
|
|
+
|
|
+ /* sysval semantic currently being normalised. */
|
|
+ enum vkd3d_shader_sysval_semantic sysval_semantic;
|
|
+ bool output;
|
|
+
|
|
+ /* Registers used by the sysval elements of the original signature. */
|
|
+ struct
|
|
+ {
|
|
+ unsigned int index;
|
|
+ unsigned int mask;
|
|
+ } regs[2];
|
|
+ unsigned int reg_count;
|
|
+
|
|
+ /* Index of the signature element created for the new array. */
|
|
+ unsigned int element_idx;
|
|
+ /* Indexable temporary reserved to store a copy of the native sysval
|
|
+ * values for the current phase. If ~0u, the temporary has not been
|
|
+ * allocated for this phase yet. */
|
|
+ unsigned int idxtemp_idx;
|
|
+
|
|
+ enum vkd3d_shader_opcode phase;
|
|
+};
|
|
+
|
|
+static enum vkd3d_result sysval_array_normaliser_add_components(
|
|
+ struct sysval_array_normaliser *normaliser, unsigned int index, unsigned int mask)
|
|
+{
|
|
+ unsigned int q;
|
|
+
|
|
+ for (q = 0; q < normaliser->reg_count; ++q)
|
|
+ {
|
|
+ if (index == normaliser->regs[q].index)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (q == normaliser->reg_count)
|
|
+ {
|
|
+ if (normaliser->reg_count >= ARRAY_SIZE(normaliser->regs))
|
|
+ {
|
|
+ vkd3d_shader_error(normaliser->ctx->message_context,
|
|
+ &normaliser->ctx->null_location, VKD3D_SHADER_ERROR_VSIR_INVALID_SIGNATURE,
|
|
+ "Sysval semantic %#x elements require more than %zu registers.\n",
|
|
+ normaliser->sysval_semantic, ARRAY_SIZE(normaliser->regs));
|
|
+ return VKD3D_ERROR_INVALID_SHADER;
|
|
+ }
|
|
+ normaliser->reg_count += 1;
|
|
+ }
|
|
+ normaliser->regs[q].index = index;
|
|
+ normaliser->regs[q].mask |= mask;
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
+static enum vkd3d_result sysval_array_normaliser_init(struct vsir_transformation_context *ctx,
|
|
+ const char *semantic_name, enum vkd3d_shader_sysval_semantic sysval_semantic,
|
|
+ bool output, struct sysval_array_normaliser *normaliser)
|
|
+{
|
|
+ unsigned int component_count = 0, next_register_index;
|
|
+ struct shader_signature *signature;
|
|
+ struct signature_element *element;
|
|
+ enum vkd3d_result res;
|
|
+
|
|
+ memset(normaliser, 0, sizeof(*normaliser));
|
|
+ normaliser->ctx = ctx;
|
|
+ normaliser->sysval_semantic = sysval_semantic;
|
|
+ normaliser->output = output;
|
|
+ normaliser->element_idx = ~0u;
|
|
+
|
|
+ normaliser->phase = VSIR_OP_INVALID;
|
|
+
|
|
+ signature = output ? &ctx->program->output_signature : &ctx->program->input_signature;
|
|
+
|
|
+ for (unsigned int i = 0; i < signature->element_count; ++i)
|
|
+ {
|
|
+ element = &signature->elements[i];
|
|
+ if (element->sysval_semantic != sysval_semantic)
|
|
+ continue;
|
|
+
|
|
+ for (unsigned int j = 0; j < element->register_count; ++j)
|
|
+ {
|
|
+ if ((res = sysval_array_normaliser_add_components(normaliser,
|
|
+ element->register_index + j, element->mask)) < 0)
|
|
+ return res;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (!normaliser->reg_count)
|
|
+ return VKD3D_OK;
|
|
+ next_register_index = vsir_signature_next_location(signature);
|
|
+ if (!(element = add_signature_element(signature, semantic_name, next_register_index,
|
|
+ VKD3DSP_WRITEMASK_0, signature->element_count, element->interpolation_mode)))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ element->sysval_semantic = sysval_semantic;
|
|
+ for (unsigned int q = 0; q < normaliser->reg_count; ++q)
|
|
+ {
|
|
+ component_count += vkd3d_popcount(normaliser->regs[q].mask);
|
|
+ }
|
|
+ element->register_count = component_count;
|
|
+ normaliser->element_idx = signature->element_count - 1;
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
+/* For every component 'k' that belongs to an output signature element that
|
|
+ * has the sysval currently being handled by the sysval_array_normaliser, add
|
|
+ * the following instruction before the return points of the program:
|
|
+ *
|
|
+ * mov o[k][e].x, x[idxtmp_idx][q].kkkk
|
|
+ *
|
|
+ * or in case this is the control point phase of a hull shader:
|
|
+ *
|
|
+ * mov o[k][P][e].x, x[idxtmp_idx][q].kkkk
|
|
+ *
|
|
+ * where:
|
|
+ * 'q' is the index of the register containing 'k' in the normaliser's
|
|
+ * internal list.
|
|
+ * '.kkkk' is the replicated swizzle that corresponds to component 'k'.
|
|
+ * 'e' is the new array's signature element index.
|
|
+ * 'idxtmp_idx' is the index of the indexable temp reserved by the
|
|
+ * normaliser.
|
|
+ * 'P' is the output control point ID.
|
|
+ */
|
|
+static enum vkd3d_result sysval_array_normaliser_add_output_copy(
|
|
+ struct sysval_array_normaliser *normaliser, struct vsir_program_iterator *it)
|
|
+{
|
|
+ struct vsir_program *program = normaliser->ctx->program;
|
|
+ struct vsir_src_operand *outpointid_param = NULL;
|
|
+ unsigned int output_component_count = 0;
|
|
+ struct vkd3d_shader_instruction *mov;
|
|
+ struct signature_element *element;
|
|
+ struct vkd3d_shader_location loc;
|
|
+
|
|
+ if (!normaliser->output)
|
|
+ return VKD3D_OK;
|
|
+ if (vsir_opcode_is_fork_or_join_phase(normaliser->phase))
|
|
+ return VKD3D_OK;
|
|
+ if (normaliser->idxtemp_idx == ~0u)
|
|
+ return VKD3D_OK;
|
|
+
|
|
+ element = &program->output_signature.elements[normaliser->element_idx];
|
|
+ loc = vsir_program_iterator_current(it)->location;
|
|
+
|
|
+ if (program->shader_version.type == VKD3D_SHADER_TYPE_HULL
|
|
+ && !(outpointid_param = vsir_program_create_outpointid_param(program)))
|
|
+ {
|
|
+ ERR("Failed to allocate outpointid param.\n");
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ }
|
|
+
|
|
+ for (unsigned int q = 0; q < normaliser->reg_count; ++q)
|
|
+ {
|
|
+ for (unsigned int k = 0; k < VKD3D_VEC4_SIZE; ++k)
|
|
+ {
|
|
+ struct vsir_src_operand *src;
|
|
+ struct vsir_dst_operand *dst;
|
|
+
|
|
+ if (!(normaliser->regs[q].mask & (1u << k)))
|
|
+ continue;
|
|
+
|
|
+ if (!(mov = vsir_program_iterator_insert_before_and_move(it, 1)))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+
|
|
+ if (!vsir_instruction_init_with_params(program, mov, &loc, VSIR_OP_MOV, 1, 1))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+
|
|
+ dst = &mov->dst[0];
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_OUTPUT, VSIR_DATA_F32, 2);
|
|
+ dst->reg.idx[0].offset = output_component_count++;
|
|
+ dst->reg.idx[1].offset = normaliser->element_idx;
|
|
+ dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ dst->write_mask = VKD3DSP_WRITEMASK_0;
|
|
+ if (outpointid_param)
|
|
+ {
|
|
+ dst->reg.idx_count = 3;
|
|
+ dst->reg.idx[2] = dst->reg.idx[1];
|
|
+ dst->reg.idx[1].rel_addr = outpointid_param;
|
|
+ dst->reg.idx[1].offset = 0;
|
|
+ }
|
|
+
|
|
+ src = &mov->src[0];
|
|
+ vsir_src_operand_init(src, VKD3DSPR_IDXTEMP, VSIR_DATA_F32, 2);
|
|
+ src->reg.idx[0].offset = normaliser->idxtemp_idx;
|
|
+ src->reg.idx[1].offset = q;
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->swizzle = vsir_swizzle_from_writemask(1u << k);
|
|
+
|
|
+ vsir_program_iterator_next(it);
|
|
+ }
|
|
+ }
|
|
+ VKD3D_ASSERT(output_component_count == element->register_count);
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
+/* For every component 'k' that belongs to an input signature element that has
|
|
+ * the sysval currently being handled by the sysval_array_normaliser, add the
|
|
+ * following single instruction at the beginning of the program:
|
|
+ *
|
|
+ * mov x[idxtmp_idx][q].k, v[k][e].x
|
|
+ *
|
|
+ * or in case there are multiple input control points, add multiple
|
|
+ * instructions, one for every one of them 'p':
|
|
+ *
|
|
+ * mov x[idxtmp_idx][p * reg_count + q].k, v[k][p][e].x
|
|
+ *
|
|
+ * where:
|
|
+ * 'q' is the index of the register containing 'k' in the normaliser's
|
|
+ * internal list.
|
|
+ * '.k' is the write mask that corresponds to component 'k'
|
|
+ * 'e' is the new array's signature element index.
|
|
+ * 'idxtmp_idx' is the index of the indexable temp reserved by the
|
|
+ * normaliser.
|
|
+ * 'reg_count' is the number of registers in the normaliser's internal
|
|
+ * list.
|
|
+ *
|
|
+ * NOTE: This function also does this for components 'k' that belong to an
|
|
+ * output signature in case the normaliser is handling an output semantic and
|
|
+ * this is the fork or join phase of a hull shader, where they can be used as
|
|
+ * source operands. Naturally, 'o' registers are used as source operands on
|
|
+ * such 'mov' instructions instead of 'v'.
|
|
+ */
|
|
+static enum vkd3d_result sysval_array_normaliser_add_input_copy(
|
|
+ struct sysval_array_normaliser *normaliser, struct vsir_program_iterator *it)
|
|
+{
|
|
+ struct vsir_program *program = normaliser->ctx->program;
|
|
+ struct vkd3d_shader_instruction *mov;
|
|
+ struct signature_element *element;
|
|
+ unsigned int control_point_count;
|
|
+ struct vkd3d_shader_location loc;
|
|
+
|
|
+ loc = vsir_program_iterator_current(it)->location;
|
|
+ if (normaliser->output)
|
|
+ {
|
|
+ control_point_count = program->output_control_point_count;
|
|
+ element = &program->output_signature.elements[normaliser->element_idx];
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ control_point_count = program->input_control_point_count;
|
|
+ element = &program->input_signature.elements[normaliser->element_idx];
|
|
+ }
|
|
+
|
|
+ if (!vsir_program_iterator_insert_before_and_move(it, max(1, control_point_count) * element->register_count))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+
|
|
+ for (unsigned int p = 0; p < max(1, control_point_count); ++p)
|
|
+ {
|
|
+ unsigned int input_component_count = 0;
|
|
+
|
|
+ for (unsigned int q = 0; q < normaliser->reg_count; ++q)
|
|
+ {
|
|
+ for (unsigned int k = 0; k < VKD3D_VEC4_SIZE; ++k)
|
|
+ {
|
|
+ struct vsir_src_operand *src;
|
|
+ struct vsir_dst_operand *dst;
|
|
+
|
|
+ if (!(normaliser->regs[q].mask & (1u << k)))
|
|
+ continue;
|
|
+
|
|
+ mov = vsir_program_iterator_current(it);
|
|
+ vsir_instruction_init_with_params(program, mov, &loc, VSIR_OP_MOV, 1, 1);
|
|
+
|
|
+ dst = &mov->dst[0];
|
|
+ vsir_dst_operand_init(dst, VKD3DSPR_IDXTEMP, VSIR_DATA_F32, 2);
|
|
+ dst->reg.idx[0].offset = normaliser->idxtemp_idx;
|
|
+ dst->reg.idx[1].offset = p * normaliser->reg_count + q;
|
|
+ dst->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ dst->write_mask = 1u << k;
|
|
+
|
|
+ src = &mov->src[0];
|
|
+ if (control_point_count)
|
|
+ {
|
|
+ vsir_src_operand_init(src, normaliser->output ? VKD3DSPR_OUTPUT : VKD3DSPR_INPUT,
|
|
+ VSIR_DATA_F32, 3);
|
|
+ src->reg.idx[0].offset = input_component_count++;
|
|
+ src->reg.idx[1].offset = p;
|
|
+ src->reg.idx[2].offset = normaliser->element_idx;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ vsir_src_operand_init(src, VKD3DSPR_INPUT, VSIR_DATA_F32, 2);
|
|
+ src->reg.idx[0].offset = input_component_count++;
|
|
+ src->reg.idx[1].offset = normaliser->element_idx;
|
|
+ }
|
|
+ src->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ src->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
+
|
|
+ vsir_program_iterator_next(it);
|
|
+ }
|
|
+ }
|
|
+ VKD3D_ASSERT(input_component_count == element->register_count);
|
|
+ }
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
+/* NOTE: This might be replaced by a single field in vsir_program at some point. */
|
|
+static unsigned int vsir_program_get_idxtemp_count(struct vsir_program *program)
|
|
+{
|
|
+ struct vsir_program_iterator it = vsir_program_iterator(&program->instructions);
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ size_t count = 0;
|
|
+
|
|
+ for (ins = vsir_program_iterator_head(&it); ins; ins = vsir_program_iterator_next(&it))
|
|
+ {
|
|
+ if (ins->opcode != VSIR_OP_DCL_INDEXABLE_TEMP)
|
|
+ continue;
|
|
+ if (count < ins->declaration.indexable_temp.register_idx)
|
|
+ count = ins->declaration.indexable_temp.register_idx;
|
|
+ }
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+static enum vkd3d_result sysval_array_normaliser_dcl_indexable_temp(
|
|
+ struct sysval_array_normaliser *normaliser, struct vsir_program_iterator *it, size_t idx)
|
|
+{
|
|
+ struct vsir_program *program = normaliser->ctx->program;
|
|
+ unsigned int register_size = normaliser->reg_count;
|
|
+ struct vkd3d_shader_indexable_temp *t;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ unsigned int control_point_count;
|
|
+
|
|
+ normaliser->idxtemp_idx = idx;
|
|
+ control_point_count = normaliser->output
|
|
+ ? program->output_control_point_count : program->input_control_point_count;
|
|
+
|
|
+ if (control_point_count && (!normaliser->output || vsir_opcode_is_fork_or_join_phase(normaliser->phase)))
|
|
+ register_size *= program->input_control_point_count;
|
|
+
|
|
+ if (!(ins = vsir_program_iterator_insert_before_and_move(it, 1)))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+
|
|
+ vsir_instruction_init_with_params(program, ins, &normaliser->ctx->null_location, VSIR_OP_DCL_INDEXABLE_TEMP, 0, 0);
|
|
+ t = &ins->declaration.indexable_temp;
|
|
+ t->register_idx = normaliser->idxtemp_idx;
|
|
+ t->register_size = register_size;
|
|
+ t->alignment = 0;
|
|
+ t->data_type = VSIR_DATA_F32;
|
|
+ t->component_count = 4;
|
|
+ t->has_function_scope = false;
|
|
+
|
|
+ vsir_program_iterator_next(it);
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
+static bool vsir_program_validate_outpointid_control_point_index(const struct vkd3d_shader_register *reg)
|
|
+{
|
|
+ const struct vkd3d_shader_register_index *index;
|
|
+
|
|
+ if (reg->idx_count < 2)
|
|
+ return false;
|
|
+
|
|
+ index = ®->idx[reg->idx_count - 2];
|
|
+ if (index->offset)
|
|
+ return false;
|
|
+ if (!index->rel_addr || index->rel_addr->reg.type != VKD3DSPR_OUTPOINTID)
|
|
+ return false;
|
|
+ if (index->rel_addr->reg.idx_count)
|
|
+ return false;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/* If a register refers to a signature element of index 'e' that has the
|
|
+ * sysval being handled by the normaliser, this maps the register as follows:
|
|
+ *
|
|
+ * v[e] -> x[idxtmp_idx][q]
|
|
+ *
|
|
+ * v[i][e] -> x[idxtmp_idx][i + q]
|
|
+ * on shaders without control points.
|
|
+ *
|
|
+ * v[p][e] -> x[idxtmp_idx][p * reg_count + q],
|
|
+ * on shaders with control points.
|
|
+ *
|
|
+ * v[i][p][e] -> x[idxtmp_idx][p * reg_count + i + q]
|
|
+ * on shaders with control points.
|
|
+ *
|
|
+ * o[e] -> x[idxtmp_idx][q]
|
|
+ *
|
|
+ * o[i][e] -> x[idxtmp_idx][i + q]
|
|
+ * on shaders without control points.
|
|
+ *
|
|
+ * o[p][e] -> x[idxtmp_idx][p * reg_count + q]
|
|
+ * if on HS fork/join phase, where it is a src.
|
|
+ *
|
|
+ * o[P][e] -> x[idxtmp_idx][q]
|
|
+ * if on HS control point phase, where it is a dst.
|
|
+ * P is expected to always be the output control point ID.
|
|
+ *
|
|
+ * o[i][p][e] -> x[idxtmp_idx][p * reg_count + i + q]
|
|
+ * if on HS fork/join phase, where it is a src.
|
|
+ *
|
|
+ * o[i][P][e] -> x[idxtmp_idx][i + q]
|
|
+ * if on HS control point phase, where it is a dst.
|
|
+ * P is expected to always be the output control point ID.
|
|
+ *
|
|
+ * where:
|
|
+ * 'q' is the index of the register that matches signature element 'e' in
|
|
+ * the normaliser's internal list.
|
|
+ * 'idxtmp_idx' is the index of the indexable temp reserved by the
|
|
+ * normaliser.
|
|
+ * 'reg_count' is the number of registers in the normaliser's internal
|
|
+ * list.
|
|
+ *
|
|
+ * The swizzle (for source operands) is also combined with the mask of the
|
|
+ * relevant signature element 'e'.
|
|
+ */
|
|
+static enum vkd3d_result sysval_array_normaliser_map_register(struct sysval_array_normaliser *normaliser,
|
|
+ struct vsir_program_iterator *it, struct vkd3d_shader_register *reg, unsigned int *src_swizzle)
|
|
+{
|
|
+ struct vkd3d_shader_register_index i_idx = {0}, p_idx = {0};
|
|
+ struct vsir_program *program = normaliser->ctx->program;
|
|
+ unsigned int element_index, control_point_count;
|
|
+ struct vkd3d_shader_instruction *ssa_ins;
|
|
+ struct shader_signature *signature;
|
|
+ struct signature_element *element;
|
|
+ struct vkd3d_shader_location loc;
|
|
+ unsigned int q;
|
|
+
|
|
+ loc = vsir_program_iterator_current(it)->location;
|
|
+
|
|
+ signature = normaliser->output ? &program->output_signature : &program->input_signature;
|
|
+ control_point_count = normaliser->output ? program->output_control_point_count
|
|
+ : program->input_control_point_count;
|
|
+
|
|
+ for (unsigned int i = 0; i < reg->idx_count; ++i)
|
|
+ {
|
|
+ if (reg->idx[i].rel_addr)
|
|
+ sysval_array_normaliser_map_register(normaliser, it,
|
|
+ ®->idx[i].rel_addr->reg, ®->idx[i].rel_addr->swizzle);
|
|
+ }
|
|
+
|
|
+ if (normaliser->output && reg->type != VKD3DSPR_OUTPUT)
|
|
+ return VKD3D_OK;
|
|
+ if (!normaliser->output && reg->type != VKD3DSPR_INPUT)
|
|
+ return VKD3D_OK;
|
|
+
|
|
+ element_index = reg->idx[reg->idx_count - 1].offset;
|
|
+ element = &signature->elements[element_index];
|
|
+ if (element->sysval_semantic != normaliser->sysval_semantic)
|
|
+ return VKD3D_OK;
|
|
+
|
|
+ for (q = 0; q < normaliser->reg_count; ++q)
|
|
+ {
|
|
+ if (normaliser->regs[q].index == element->register_index)
|
|
+ break;
|
|
+ }
|
|
+ VKD3D_ASSERT(q < normaliser->reg_count);
|
|
+
|
|
+ if (normaliser->output && normaliser->phase == VSIR_OP_HS_CONTROL_POINT_PHASE)
|
|
+ {
|
|
+ if (!vsir_program_validate_outpointid_control_point_index(reg))
|
|
+ vkd3d_shader_error(normaliser->ctx->message_context, &loc, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
|
|
+ "Control point index of output source operand is not OUTPOINTID.\n");
|
|
+ }
|
|
+
|
|
+ if (control_point_count)
|
|
+ {
|
|
+ if (reg->idx_count == 3)
|
|
+ {
|
|
+ i_idx = reg->idx[0];
|
|
+ p_idx = reg->idx[1];
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ p_idx = reg->idx[0];
|
|
+ }
|
|
+ }
|
|
+ else if (reg->idx_count == 2)
|
|
+ {
|
|
+ i_idx = reg->idx[0];
|
|
+ }
|
|
+
|
|
+ reg->type = VKD3DSPR_IDXTEMP;
|
|
+ reg->idx[0].offset = normaliser->idxtemp_idx;
|
|
+ reg->idx[0].rel_addr = NULL;
|
|
+ reg->idx_count = 2;
|
|
+
|
|
+ if (p_idx.rel_addr && !(normaliser->output && normaliser->phase == VSIR_OP_HS_CONTROL_POINT_PHASE))
|
|
+ {
|
|
+ if (!(ssa_ins = vsir_program_iterator_insert_before_and_move(it, 1 + !!i_idx.rel_addr)))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+
|
|
+ if (!vsir_instruction_init_with_params(program, ssa_ins, &loc, VSIR_OP_IMUL_LOW, 1, 2))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+
|
|
+ vsir_register_init(&ssa_ins->dst[0].reg, VKD3DSPR_SSA, VSIR_DATA_U32, 1);
|
|
+ ssa_ins->dst[0].reg.idx[0].offset = program->ssa_count++;
|
|
+ ssa_ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ ssa_ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0;
|
|
+ ssa_ins->src[0] = *p_idx.rel_addr;
|
|
+ vsir_src_operand_init_const_u32(&ssa_ins->src[1], normaliser->reg_count);
|
|
+
|
|
+ if (i_idx.rel_addr)
|
|
+ {
|
|
+ ssa_ins = vsir_program_iterator_next(it);
|
|
+ if (!vsir_instruction_init_with_params(program, ssa_ins, &loc, VSIR_OP_ADD, 1, 2))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+
|
|
+ vsir_register_init(&ssa_ins->dst[0].reg, VKD3DSPR_SSA, VSIR_DATA_U32, 1);
|
|
+ ssa_ins->dst[0].reg.idx[0].offset = program->ssa_count++;
|
|
+ ssa_ins->dst[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ ssa_ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0;
|
|
+ vsir_register_init(&ssa_ins->src[0].reg, VKD3DSPR_SSA, VSIR_DATA_U32, 1);
|
|
+ ssa_ins->src[0].reg.idx[0].offset = program->ssa_count - 2;
|
|
+ ssa_ins->src[1] = *i_idx.rel_addr;
|
|
+ }
|
|
+
|
|
+ vsir_program_iterator_next(it);
|
|
+
|
|
+ reg->idx[1].offset = normaliser->reg_count * p_idx.offset + i_idx.offset + q;
|
|
+ if (!(reg->idx[1].rel_addr = vsir_program_get_src_operands(program, 1)))
|
|
+ return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ vsir_register_init(®->idx[1].rel_addr->reg, VKD3DSPR_SSA, VSIR_DATA_U32, 1);
|
|
+ reg->idx[1].rel_addr->reg.idx[0].offset = program->ssa_count - 1;
|
|
+ reg->idx[1].rel_addr->reg.dimension = VSIR_DIMENSION_VEC4;
|
|
+ reg->idx[1].rel_addr->swizzle = VKD3D_SHADER_SWIZZLE_X;
|
|
+ reg->idx[1].rel_addr->modifiers = 0;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ reg->idx[1].offset = normaliser->reg_count * p_idx.offset + i_idx.offset + q;
|
|
+ reg->idx[1].rel_addr = i_idx.rel_addr;
|
|
+ }
|
|
+
|
|
+ if (src_swizzle)
|
|
+ *src_swizzle = vsir_combine_swizzles(vsir_swizzle_from_writemask(element->mask), *src_swizzle);
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
+static enum vkd3d_result sysval_array_normaliser_map_instruction(
|
|
+ struct sysval_array_normaliser *normaliser, struct vsir_program_iterator *it)
|
|
+{
|
|
+ struct vkd3d_shader_instruction *ins = vsir_program_iterator_current(it);
|
|
+ unsigned int src_count, dst_count;
|
|
+ enum vkd3d_result res;
|
|
+
|
|
+ if (vsir_instruction_is_dcl(ins))
|
|
+ return VKD3D_OK;
|
|
+
|
|
+ dst_count = ins->dst_count;
|
|
+ src_count = ins->src_count;
|
|
+
|
|
+ for (unsigned int k = 0; k < dst_count; ++k)
|
|
+ {
|
|
+ ins = vsir_program_iterator_current(it);
|
|
+ if ((res = sysval_array_normaliser_map_register(normaliser, it, &ins->dst[k].reg, NULL)))
|
|
+ return res;
|
|
+ }
|
|
+
|
|
+ for (unsigned int k = 0; k < src_count; ++k)
|
|
+ {
|
|
+ ins = vsir_program_iterator_current(it);
|
|
+ if ((res = sysval_array_normaliser_map_register(normaliser, it, &ins->src[k].reg, &ins->src[k].swizzle)))
|
|
+ return res;
|
|
+ }
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
+static void shader_register_remove_signature_element(struct vkd3d_shader_register *reg,
|
|
+ enum vkd3d_shader_register_type type, unsigned int index)
|
|
+{
|
|
+ unsigned int current_idx;
|
|
+
|
|
+ for (unsigned int i = 0; i < reg->idx_count; ++i)
|
|
+ {
|
|
+ if (reg->idx[i].rel_addr)
|
|
+ shader_register_remove_signature_element(®->idx[i].rel_addr->reg, type, index);
|
|
+ }
|
|
+
|
|
+ if (reg->type != type)
|
|
+ return;
|
|
+
|
|
+ VKD3D_ASSERT(!reg->idx[reg->idx_count - 1].rel_addr);
|
|
+ current_idx = reg->idx[reg->idx_count - 1].offset;
|
|
+ VKD3D_ASSERT(current_idx != index);
|
|
+ if (current_idx > index)
|
|
+ --reg->idx[reg->idx_count - 1].offset;
|
|
+}
|
|
+
|
|
+static void vsir_program_remove_signature_element(struct vsir_program *program,
|
|
+ enum vkd3d_shader_register_type type, unsigned int index)
|
|
+{
|
|
+ struct vsir_program_iterator it = vsir_program_iterator(&program->instructions);
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ struct shader_signature *signature;
|
|
+
|
|
+ switch (type)
|
|
+ {
|
|
+ case VKD3DSPR_INPUT:
|
|
+ signature = &program->input_signature;
|
|
+ break;
|
|
+ case VKD3DSPR_OUTPUT:
|
|
+ signature = &program->output_signature;
|
|
+ break;
|
|
+ case VKD3DSPR_PATCHCONST:
|
|
+ signature = &program->patch_constant_signature;
|
|
+ break;
|
|
+ default:
|
|
+ vkd3d_unreachable();
|
|
+ }
|
|
+
|
|
+ for (ins = vsir_program_iterator_head(&it); ins; ins = vsir_program_iterator_next(&it))
|
|
+ {
|
|
+ if (vsir_instruction_is_dcl(ins))
|
|
+ continue;
|
|
+ for (unsigned int i = 0; i < ins->dst_count; ++i)
|
|
+ shader_register_remove_signature_element(&ins->dst[i].reg, type, index);
|
|
+ for (unsigned int i = 0; i < ins->src_count; ++i)
|
|
+ shader_register_remove_signature_element(&ins->src[i].reg, type, index);
|
|
+ }
|
|
+
|
|
+ memmove(&signature->elements[index], &signature->elements[index + 1],
|
|
+ sizeof(*signature->elements) * (signature->element_count - 1 - index));
|
|
+ --signature->element_count;
|
|
+}
|
|
+
|
|
+static void sysval_array_normaliser_remove_old_signature_elements(struct sysval_array_normaliser *normaliser)
|
|
+{
|
|
+ struct vsir_program *program = normaliser->ctx->program;
|
|
+ enum vkd3d_shader_register_type type;
|
|
+ struct shader_signature *signature;
|
|
+ struct signature_element *element;
|
|
+
|
|
+ signature = normaliser->output ? &program->output_signature : &program->input_signature;
|
|
+ type = normaliser->output ? VKD3DSPR_OUTPUT : VKD3DSPR_INPUT;
|
|
+
|
|
+ for (int i = signature->element_count - 2; i >= 0; --i)
|
|
+ {
|
|
+ element = &signature->elements[i];
|
|
+ if (element->sysval_semantic != normaliser->sysval_semantic)
|
|
+ continue;
|
|
+ TRACE("Removing %s signature element index %u.\n", normaliser->output ? "output" : "input", i);
|
|
+ vsir_program_remove_signature_element(program, type, i);
|
|
+ }
|
|
+}
|
|
+
|
|
+static enum vkd3d_result vsir_program_normalise_sysval_array(struct vsir_transformation_context *ctx,
|
|
+ const char *semantic_name, enum vkd3d_shader_sysval_semantic sysval_semantic, bool output)
|
|
+{
|
|
+ struct vsir_program *program = ctx->program;
|
|
+ struct sysval_array_normaliser normaliser;
|
|
+ struct vkd3d_shader_instruction *ins;
|
|
+ struct vsir_program_iterator it;
|
|
+ bool declarations = true;
|
|
+ enum vkd3d_result res;
|
|
+
|
|
+ if ((res = sysval_array_normaliser_init(ctx, semantic_name, sysval_semantic, output, &normaliser)) < 0)
|
|
+ return res;
|
|
+
|
|
+ if (!normaliser.reg_count)
|
|
+ return VKD3D_OK;
|
|
+
|
|
+ if (!output && program->shader_version.type == VKD3D_SHADER_TYPE_VERTEX)
|
|
+ return VKD3D_OK;
|
|
+
|
|
+ if (TRACE_ON())
|
|
+ vsir_program_trace(program);
|
|
+
|
|
+ it = vsir_program_iterator(&program->instructions);
|
|
+ for (ins = vsir_program_iterator_head(&it); ins; ins = vsir_program_iterator_next(&it))
|
|
+ {
|
|
+ if (ins->opcode == VSIR_OP_HS_DECLS || ins->opcode == VSIR_OP_HS_CONTROL_POINT_PHASE
|
|
+ || ins->opcode == VSIR_OP_HS_FORK_PHASE || ins->opcode == VSIR_OP_HS_JOIN_PHASE)
|
|
+ {
|
|
+ normaliser.phase = ins->opcode;
|
|
+ declarations = true;
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ if (declarations && !vsir_instruction_is_dcl(ins) && ins->opcode != VSIR_OP_NOP)
|
|
+ {
|
|
+ unsigned int idxtemp_idx = vsir_program_get_idxtemp_count(program) + 1;
|
|
+
|
|
+ declarations = false;
|
|
+
|
|
+ if ((res = sysval_array_normaliser_dcl_indexable_temp(&normaliser, &it, idxtemp_idx)) < 0)
|
|
+ return res;
|
|
+
|
|
+ if (vsir_program_iterator_current(&it)->opcode == VSIR_OP_LABEL)
|
|
+ ins = vsir_program_iterator_next(&it);
|
|
+
|
|
+ if ((!output || vsir_opcode_is_fork_or_join_phase(normaliser.phase))
|
|
+ && (res = sysval_array_normaliser_add_input_copy(&normaliser, &it)) < 0)
|
|
+ return res;
|
|
+ }
|
|
+
|
|
+ if (!declarations)
|
|
+ {
|
|
+ if (ins->opcode == VSIR_OP_RET || ins->opcode == VSIR_OP_EMIT || ins->opcode == VSIR_OP_EMIT_STREAM)
|
|
+ {
|
|
+ if ((output && !vsir_opcode_is_fork_or_join_phase(normaliser.phase))
|
|
+ && (res = sysval_array_normaliser_add_output_copy(&normaliser, &it)) < 0)
|
|
+ return res;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ if ((res = sysval_array_normaliser_map_instruction(&normaliser, &it)) < 0)
|
|
+ return res;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ VKD3D_ASSERT(!declarations);
|
|
+ if (TRACE_ON())
|
|
+ vsir_program_trace(program);
|
|
+ sysval_array_normaliser_remove_old_signature_elements(&normaliser);
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
+/* This pass transform clip/cull system values from the Direct3D convention of
|
|
+ * 2 4-component registers, into the SPIR-V/GLSL convention of 8-element
|
|
+ * scalar float arrays. */
|
|
+static enum vkd3d_result vsir_program_normalise_clip_cull(
|
|
+ struct vsir_program *program, struct vsir_transformation_context *ctx)
|
|
+{
|
|
+ enum vkd3d_result res;
|
|
+
|
|
+ if ((res = vsir_program_normalise_sysval_array(ctx, "SV_ClipDistance", VKD3D_SHADER_SV_CLIP_DISTANCE, false)) < 0)
|
|
+ return res;
|
|
+ if ((res = vsir_program_normalise_sysval_array(ctx, "SV_ClipDistance", VKD3D_SHADER_SV_CLIP_DISTANCE, true)) < 0)
|
|
+ return res;
|
|
+ if ((res = vsir_program_normalise_sysval_array(ctx, "SV_CullDistance", VKD3D_SHADER_SV_CULL_DISTANCE, false)) < 0)
|
|
+ return res;
|
|
+ if ((res = vsir_program_normalise_sysval_array(ctx, "SV_CullDistance", VKD3D_SHADER_SV_CULL_DISTANCE, true)) < 0)
|
|
+ return res;
|
|
+
|
|
+ program->normalisation_flags.normalised_clip_cull_arrays = true;
|
|
+
|
|
+ return VKD3D_OK;
|
|
+}
|
|
+
|
|
static bool is_pre_rasterization_shader(enum vkd3d_shader_type type)
|
|
{
|
|
return type == VKD3D_SHADER_TYPE_VERTEX
|
|
@@ -8134,9 +9082,9 @@ static enum vkd3d_result insert_point_size_before_ret(struct vsir_program *progr
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_RASTOUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_RASTOUT, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = VSIR_RASTOUT_POINT_SIZE;
|
|
- src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE, VSIR_DATA_F32);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
return VKD3D_OK;
|
|
@@ -8243,12 +9191,12 @@ static enum vkd3d_result vsir_program_insert_point_size_clamp(struct vsir_progra
|
|
|
|
for (size_t j = 0; j < ins->dst_count; ++j)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->dst[j];
|
|
+ struct vsir_dst_operand *dst = &ins->dst[j];
|
|
|
|
/* Note we run after I/O normalization. */
|
|
if (dst->reg.type == VKD3DSPR_RASTOUT)
|
|
{
|
|
- dst_param_init_ssa_float(dst, program->ssa_count);
|
|
+ vsir_dst_operand_init_ssa_f32(dst, program->ssa_count);
|
|
ssa_value = program->ssa_count++;
|
|
clamp = true;
|
|
}
|
|
@@ -8266,16 +9214,16 @@ static enum vkd3d_result vsir_program_insert_point_size_clamp(struct vsir_progra
|
|
if (min_parameter)
|
|
{
|
|
vsir_instruction_init_with_params(program, ins, loc, VSIR_OP_MAX, 1, 2);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_value);
|
|
- src_param_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE_MIN, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_value);
|
|
+ vsir_src_operand_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE_MIN, VSIR_DATA_F32);
|
|
if (max_parameter)
|
|
{
|
|
- dst_param_init_ssa_float(&ins->dst[0], program->ssa_count);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], program->ssa_count);
|
|
ssa_value = program->ssa_count++;
|
|
}
|
|
else
|
|
{
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_RASTOUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_RASTOUT, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = VSIR_RASTOUT_POINT_SIZE;
|
|
}
|
|
ins = vsir_program_iterator_next(&it);
|
|
@@ -8284,9 +9232,9 @@ static enum vkd3d_result vsir_program_insert_point_size_clamp(struct vsir_progra
|
|
if (max_parameter)
|
|
{
|
|
vsir_instruction_init_with_params(program, ins, loc, VSIR_OP_MIN, 1, 2);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_value);
|
|
- src_param_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE_MAX, VSIR_DATA_F32);
|
|
- vsir_dst_param_init(&ins->dst[0], VKD3DSPR_RASTOUT, VSIR_DATA_F32, 1);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_value);
|
|
+ vsir_src_operand_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_POINT_SIZE_MAX, VSIR_DATA_F32);
|
|
+ vsir_dst_operand_init(&ins->dst[0], VKD3DSPR_RASTOUT, VSIR_DATA_F32, 1);
|
|
ins->dst[0].reg.idx[0].offset = VSIR_RASTOUT_POINT_SIZE;
|
|
ins = vsir_program_iterator_next(&it);
|
|
}
|
|
@@ -8307,7 +9255,7 @@ static bool has_texcoord_signature_element(const struct shader_signature *signat
|
|
|
|
/* Returns true if replacement was done. */
|
|
static bool replace_texcoord_with_point_coord(struct vsir_program *program,
|
|
- struct vkd3d_shader_src_param *src, unsigned int coord_temp)
|
|
+ struct vsir_src_operand *src, unsigned int coord_temp)
|
|
{
|
|
uint32_t prev_swizzle = src->swizzle;
|
|
const struct signature_element *e;
|
|
@@ -8458,17 +9406,17 @@ static enum vkd3d_result vsir_program_insert_point_coord(struct vsir_program *pr
|
|
return VKD3D_ERROR_OUT_OF_MEMORY;
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- dst_param_init_temp_float4(&ins->dst[0], coord_temp);
|
|
+ vsir_dst_operand_init_temp_f32v4(&ins->dst[0], coord_temp);
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_0 | VKD3DSP_WRITEMASK_1;
|
|
- vsir_src_param_init(&ins->src[0], VKD3DSPR_POINT_COORD, VSIR_DATA_F32, 0);
|
|
+ vsir_src_operand_init(&ins->src[0], VKD3DSPR_POINT_COORD, VSIR_DATA_F32, 0);
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
ins = vsir_program_iterator_next(&it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- dst_param_init_temp_float4(&ins->dst[0], coord_temp);
|
|
+ vsir_dst_operand_init_temp_f32v4(&ins->dst[0], coord_temp);
|
|
ins->dst[0].write_mask = VKD3DSP_WRITEMASK_2 | VKD3DSP_WRITEMASK_3;
|
|
- vsir_src_param_init(&ins->src[0], VKD3DSPR_IMMCONST, VSIR_DATA_F32, 0);
|
|
+ vsir_src_operand_init(&ins->src[0], VKD3DSPR_IMMCONST, VSIR_DATA_F32, 0);
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[0].swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
vsir_program_iterator_next(&it);
|
|
@@ -8532,23 +9480,23 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
|
|
ssa_temp2 = program->ssa_count++;
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_NEG, 1, 1);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_temp);
|
|
- vsir_src_param_init(&ins->src[0], VKD3DSPR_INPUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_temp);
|
|
+ vsir_src_operand_init(&ins->src[0], VKD3DSPR_INPUT, VSIR_DATA_F32, 1);
|
|
ins->src[0].reg.idx[0].offset = fog_signature_idx;
|
|
ins->src[0].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[0].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_ADD, 1, 2);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_temp2);
|
|
- src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_END, VSIR_DATA_F32);
|
|
- src_param_init_ssa_float(&ins->src[1], ssa_temp);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_temp2);
|
|
+ vsir_src_operand_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_END, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[1], ssa_temp);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MUL, 1, 2);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_factor);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_temp2);
|
|
- src_param_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_FOG_SCALE, VSIR_DATA_F32);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_factor);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_temp2);
|
|
+ vsir_src_operand_init_parameter(&ins->src[1], VKD3D_SHADER_PARAMETER_NAME_FOG_SCALE, VSIR_DATA_F32);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
break;
|
|
@@ -8567,22 +9515,22 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
|
|
ssa_temp2 = program->ssa_count++;
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MUL, 1, 2);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_temp);
|
|
- src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_SCALE, VSIR_DATA_F32);
|
|
- vsir_src_param_init(&ins->src[1], VKD3DSPR_INPUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_temp);
|
|
+ vsir_src_operand_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_SCALE, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init(&ins->src[1], VKD3DSPR_INPUT, VSIR_DATA_F32, 1);
|
|
ins->src[1].reg.idx[0].offset = fog_signature_idx;
|
|
ins->src[1].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_NEG, 1, 1);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_temp2);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_temp);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_temp2);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_temp);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_EXP, 1, 1);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_factor);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_temp2);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_factor);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_temp2);
|
|
ins = vsir_program_iterator_next(it);
|
|
break;
|
|
|
|
@@ -8602,28 +9550,28 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
|
|
ssa_temp3 = program->ssa_count++;
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MUL, 1, 2);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_temp);
|
|
- src_param_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_SCALE, VSIR_DATA_F32);
|
|
- vsir_src_param_init(&ins->src[1], VKD3DSPR_INPUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_temp);
|
|
+ vsir_src_operand_init_parameter(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_SCALE, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init(&ins->src[1], VKD3DSPR_INPUT, VSIR_DATA_F32, 1);
|
|
ins->src[1].reg.idx[0].offset = fog_signature_idx;
|
|
ins->src[1].reg.dimension = VSIR_DIMENSION_VEC4;
|
|
ins->src[1].swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MUL, 1, 2);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_temp2);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_temp);
|
|
- src_param_init_ssa_float(&ins->src[1], ssa_temp);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_temp2);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_temp);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[1], ssa_temp);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_NEG, 1, 1);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_temp3);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_temp2);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_temp3);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_temp2);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_EXP, 1, 1);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_factor);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_temp3);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_factor);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_temp3);
|
|
ins = vsir_program_iterator_next(it);
|
|
break;
|
|
|
|
@@ -8643,27 +9591,27 @@ static enum vkd3d_result insert_fragment_fog_before_ret(struct vsir_program *pro
|
|
ssa_temp3 = program->ssa_count++;
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_NEG, 1, 1);
|
|
- dst_param_init_ssa_float4(&ins->dst[0], ssa_temp);
|
|
- src_param_init_parameter_vec4(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_COLOUR, VSIR_DATA_F32);
|
|
+ vsir_dst_operand_init_ssa_f32v4(&ins->dst[0], ssa_temp);
|
|
+ vsir_src_operand_init_parameter_vec4(&ins->src[0], VKD3D_SHADER_PARAMETER_NAME_FOG_COLOUR, VSIR_DATA_F32);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_ADD, 1, 2);
|
|
- dst_param_init_ssa_float4(&ins->dst[0], ssa_temp2);
|
|
- src_param_init_temp_float4(&ins->src[0], colour_temp);
|
|
- src_param_init_ssa_float4(&ins->src[1], ssa_temp);
|
|
+ vsir_dst_operand_init_ssa_f32v4(&ins->dst[0], ssa_temp2);
|
|
+ vsir_src_operand_init_temp_f32v4(&ins->src[0], colour_temp);
|
|
+ vsir_src_operand_init_ssa_f32v4(&ins->src[1], ssa_temp);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_SATURATE, 1, 1);
|
|
- dst_param_init_ssa_float(&ins->dst[0], ssa_temp3);
|
|
- src_param_init_ssa_float(&ins->src[0], ssa_factor);
|
|
+ vsir_dst_operand_init_ssa_f32(&ins->dst[0], ssa_temp3);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[0], ssa_factor);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MAD, 1, 3);
|
|
- dst_param_init_output(&ins->dst[0], VSIR_DATA_F32, colour_signature_idx,
|
|
+ vsir_dst_operand_init_output(&ins->dst[0], VSIR_DATA_F32, colour_signature_idx,
|
|
program->output_signature.elements[colour_signature_idx].mask);
|
|
- src_param_init_ssa_float4(&ins->src[0], ssa_temp2);
|
|
- src_param_init_ssa_float(&ins->src[1], ssa_temp3);
|
|
- src_param_init_parameter_vec4(&ins->src[2], VKD3D_SHADER_PARAMETER_NAME_FOG_COLOUR, VSIR_DATA_F32);
|
|
+ vsir_src_operand_init_ssa_f32v4(&ins->src[0], ssa_temp2);
|
|
+ vsir_src_operand_init_ssa_f32(&ins->src[1], ssa_temp3);
|
|
+ vsir_src_operand_init_parameter_vec4(&ins->src[2], VKD3D_SHADER_PARAMETER_NAME_FOG_COLOUR, VSIR_DATA_F32);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
return VKD3D_OK;
|
|
@@ -8734,7 +9682,7 @@ static enum vkd3d_result vsir_program_insert_fragment_fog(struct vsir_program *p
|
|
|
|
for (size_t j = 0; j < ins->dst_count; ++j)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->dst[j];
|
|
+ struct vsir_dst_operand *dst = &ins->dst[j];
|
|
|
|
/* Note we run after I/O normalization. */
|
|
if (dst->reg.type == VKD3DSPR_OUTPUT && dst->reg.idx[0].offset == colour_signature_idx)
|
|
@@ -8797,8 +9745,8 @@ static enum vkd3d_result insert_vertex_fog_before_ret(struct vsir_program *progr
|
|
|
|
/* Write the fog output. */
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- dst_param_init_output(&ins->dst[0], VSIR_DATA_F32, fog_signature_idx, 0x1);
|
|
- src_param_init_temp_float4(&ins->src[0], temp);
|
|
+ vsir_dst_operand_init_output(&ins->dst[0], VSIR_DATA_F32, fog_signature_idx, 0x1);
|
|
+ vsir_src_operand_init_temp_f32v4(&ins->src[0], temp);
|
|
if (source == VKD3D_SHADER_FOG_SOURCE_Z)
|
|
ins->src[0].swizzle = VKD3D_SHADER_SWIZZLE(Z, Z, Z, Z);
|
|
else /* Position or specular W. */
|
|
@@ -8807,9 +9755,9 @@ static enum vkd3d_result insert_vertex_fog_before_ret(struct vsir_program *progr
|
|
|
|
/* Write the position or specular output. */
|
|
vsir_instruction_init_with_params(program, ins, &loc, VSIR_OP_MOV, 1, 1);
|
|
- dst_param_init_output(&ins->dst[0], vsir_data_type_from_component_type(e->component_type),
|
|
- source_signature_idx, e->mask);
|
|
- src_param_init_temp_float4(&ins->src[0], temp);
|
|
+ vsir_dst_operand_init_output(&ins->dst[0],
|
|
+ vsir_data_type_from_component_type(e->component_type), source_signature_idx, e->mask);
|
|
+ vsir_src_operand_init_temp_f32v4(&ins->src[0], temp);
|
|
ins = vsir_program_iterator_next(it);
|
|
|
|
return VKD3D_OK;
|
|
@@ -8896,7 +9844,7 @@ static enum vkd3d_result vsir_program_insert_vertex_fog(struct vsir_program *pro
|
|
|
|
for (size_t j = 0; j < ins->dst_count; ++j)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->dst[j];
|
|
+ struct vsir_dst_operand *dst = &ins->dst[j];
|
|
|
|
/* Note we run after I/O normalization. */
|
|
if (dst->reg.type == VKD3DSPR_OUTPUT && dst->reg.idx[0].offset == source_signature_idx)
|
|
@@ -9303,8 +10251,7 @@ struct liveness_tracker
|
|
} *ssa_regs, *temp_regs;
|
|
};
|
|
|
|
-static void liveness_track_src(struct liveness_tracker *tracker,
|
|
- struct vkd3d_shader_src_param *src, unsigned int index)
|
|
+static void liveness_track_src(struct liveness_tracker *tracker, struct vsir_src_operand *src, unsigned int index)
|
|
{
|
|
for (unsigned int k = 0; k < src->reg.idx_count; ++k)
|
|
{
|
|
@@ -9324,7 +10271,7 @@ static void liveness_track_src(struct liveness_tracker *tracker,
|
|
}
|
|
}
|
|
|
|
-static void liveness_track_dst(struct liveness_tracker *tracker, struct vkd3d_shader_dst_param *dst,
|
|
+static void liveness_track_dst(struct liveness_tracker *tracker, struct vsir_dst_operand *dst,
|
|
unsigned int index, const struct vkd3d_shader_version *version, enum vkd3d_shader_opcode opcode)
|
|
{
|
|
struct liveness_tracker_reg *reg;
|
|
@@ -9522,7 +10469,7 @@ struct temp_allocator
|
|
bool ps_1_x;
|
|
};
|
|
|
|
-static void temp_allocator_set_src(struct temp_allocator *allocator, struct vkd3d_shader_src_param *src)
|
|
+static void temp_allocator_set_src(struct temp_allocator *allocator, struct vsir_src_operand *src)
|
|
{
|
|
struct temp_allocator_reg *reg;
|
|
|
|
@@ -9568,7 +10515,7 @@ static uint32_t vsir_map_swizzle(uint32_t swizzle, unsigned int writemask)
|
|
return ret;
|
|
}
|
|
|
|
-static void vsir_remap_immconst(struct vkd3d_shader_src_param *src, unsigned int writemask)
|
|
+static void vsir_remap_immconst(struct vsir_src_operand *src, unsigned int writemask)
|
|
{
|
|
union vsir_immediate_constant prev = src->reg.u;
|
|
unsigned int src_component = 0;
|
|
@@ -9580,7 +10527,7 @@ static void vsir_remap_immconst(struct vkd3d_shader_src_param *src, unsigned int
|
|
}
|
|
}
|
|
|
|
-static void vsir_remap_immconst64(struct vkd3d_shader_src_param *src, unsigned int writemask)
|
|
+static void vsir_remap_immconst64(struct vsir_src_operand *src, unsigned int writemask)
|
|
{
|
|
if (writemask == (VKD3DSP_WRITEMASK_2 | VKD3DSP_WRITEMASK_3))
|
|
src->reg.u.immconst_u64[1] = src->reg.u.immconst_u64[0];
|
|
@@ -9615,7 +10562,7 @@ static bool vsir_opcode_is_double(enum vkd3d_shader_opcode opcode)
|
|
}
|
|
|
|
static void temp_allocator_set_dst(struct temp_allocator *allocator,
|
|
- struct vkd3d_shader_dst_param *dst, const struct vkd3d_shader_instruction *ins)
|
|
+ struct vsir_dst_operand *dst, const struct vkd3d_shader_instruction *ins)
|
|
{
|
|
struct temp_allocator_reg *reg;
|
|
uint32_t remapped_mask;
|
|
@@ -9659,7 +10606,7 @@ static void temp_allocator_set_dst(struct temp_allocator *allocator,
|
|
|
|
for (unsigned int i = 0; i < ins->src_count; ++i)
|
|
{
|
|
- struct vkd3d_shader_src_param *src = &ins->src[i];
|
|
+ struct vsir_src_operand *src = &ins->src[i];
|
|
|
|
if (vsir_src_is_masked(ins->opcode, i))
|
|
{
|
|
@@ -10445,8 +11392,7 @@ static void vsir_validate_io_register(struct validation_context *ctx, const stru
|
|
}
|
|
|
|
element = &signature->elements[signature_idx];
|
|
- if (element->register_count > 1 || vsir_sysval_semantic_is_tess_factor(element->sysval_semantic))
|
|
- is_array = true;
|
|
+ is_array = vsir_signature_element_is_array(element, &ctx->program->normalisation_flags);
|
|
|
|
expected_idx_count = 1 + !!has_control_point + !!is_array;
|
|
control_point_index = !!is_array;
|
|
@@ -10639,7 +11585,7 @@ static void vsir_validate_descriptor_indices(struct validation_context *ctx,
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_INDEX,
|
|
"Non-NULL indirect address for the ID of a register of type \"%s\".", name);
|
|
|
|
- if (!ctx->program->has_descriptor_info)
|
|
+ if (!ctx->program->normalisation_flags.has_descriptor_info)
|
|
return;
|
|
|
|
if (!(descriptor = vkd3d_shader_find_descriptor(&ctx->program->descriptors, type, reg->idx[0].offset)))
|
|
@@ -10817,8 +11763,7 @@ static void vsir_validate_ssa_register(struct validation_context *ctx,
|
|
}
|
|
}
|
|
|
|
-static void vsir_validate_src_param(struct validation_context *ctx,
|
|
- const struct vkd3d_shader_src_param *src);
|
|
+static void vsir_validate_src_operand(struct validation_context *ctx, const struct vsir_src_operand *src);
|
|
|
|
static void vsir_validate_register(struct validation_context *ctx,
|
|
const struct vkd3d_shader_register *reg)
|
|
@@ -10871,12 +11816,13 @@ static void vsir_validate_register(struct validation_context *ctx,
|
|
|
|
for (i = 0; i < min(reg->idx_count, ARRAY_SIZE(reg->idx)); ++i)
|
|
{
|
|
- const struct vkd3d_shader_src_param *param = reg->idx[i].rel_addr;
|
|
- if (param)
|
|
+ const struct vsir_src_operand *src;
|
|
+
|
|
+ if ((src = reg->idx[i].rel_addr))
|
|
{
|
|
- vsir_validate_src_param(ctx, param);
|
|
+ vsir_validate_src_operand(ctx, src);
|
|
|
|
- switch (param->reg.type)
|
|
+ switch (src->reg.type)
|
|
{
|
|
case VKD3DSPR_TEMP:
|
|
case VKD3DSPR_SSA:
|
|
@@ -10888,7 +11834,7 @@ static void vsir_validate_register(struct validation_context *ctx,
|
|
default:
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_REGISTER_TYPE,
|
|
"Invalid register type %#x for a relative address parameter.",
|
|
- param->reg.type);
|
|
+ src->reg.type);
|
|
break;
|
|
}
|
|
}
|
|
@@ -11011,8 +11957,7 @@ static void vsir_validate_register(struct validation_context *ctx,
|
|
reg->dimension, reg->type, validation_data->dimension);
|
|
}
|
|
|
|
-static void vsir_validate_io_dst_param(struct validation_context *ctx,
|
|
- const struct vkd3d_shader_dst_param *dst)
|
|
+static void vsir_validate_io_dst_operand(struct validation_context *ctx, const struct vsir_dst_operand *dst)
|
|
{
|
|
struct vsir_io_register_data io_reg_data;
|
|
const struct signature_element *e;
|
|
@@ -11045,8 +11990,7 @@ static void vsir_validate_io_dst_param(struct validation_context *ctx,
|
|
}
|
|
}
|
|
|
|
-static void vsir_validate_dst_param(struct validation_context *ctx,
|
|
- const struct vkd3d_shader_dst_param *dst)
|
|
+static void vsir_validate_dst_operand(struct validation_context *ctx, const struct vsir_dst_operand *dst)
|
|
{
|
|
const struct vkd3d_shader_version *version = &ctx->program->shader_version;
|
|
|
|
@@ -11076,7 +12020,7 @@ static void vsir_validate_dst_param(struct validation_context *ctx,
|
|
break;
|
|
}
|
|
|
|
- if (dst->modifiers & ~VKD3DSPDM_MASK || (ctx->program->has_no_modifiers && dst->modifiers))
|
|
+ if (dst->modifiers & ~VKD3DSPDM_MASK || (ctx->program->normalisation_flags.has_no_modifiers && dst->modifiers))
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_MODIFIERS, "Destination has invalid modifiers %#x.",
|
|
dst->modifiers);
|
|
|
|
@@ -11160,23 +12104,23 @@ static void vsir_validate_dst_param(struct validation_context *ctx,
|
|
break;
|
|
|
|
case VKD3DSPR_INPUT:
|
|
- vsir_validate_io_dst_param(ctx, dst);
|
|
+ vsir_validate_io_dst_operand(ctx, dst);
|
|
break;
|
|
|
|
case VKD3DSPR_OUTPUT:
|
|
- vsir_validate_io_dst_param(ctx, dst);
|
|
+ vsir_validate_io_dst_operand(ctx, dst);
|
|
break;
|
|
|
|
case VKD3DSPR_INCONTROLPOINT:
|
|
- vsir_validate_io_dst_param(ctx, dst);
|
|
+ vsir_validate_io_dst_operand(ctx, dst);
|
|
break;
|
|
|
|
case VKD3DSPR_OUTCONTROLPOINT:
|
|
- vsir_validate_io_dst_param(ctx, dst);
|
|
+ vsir_validate_io_dst_operand(ctx, dst);
|
|
break;
|
|
|
|
case VKD3DSPR_PATCHCONST:
|
|
- vsir_validate_io_dst_param(ctx, dst);
|
|
+ vsir_validate_io_dst_operand(ctx, dst);
|
|
break;
|
|
|
|
case VKD3DSPR_TEXTURE:
|
|
@@ -11189,8 +12133,7 @@ static void vsir_validate_dst_param(struct validation_context *ctx,
|
|
}
|
|
}
|
|
|
|
-static void vsir_validate_io_src_param(struct validation_context *ctx,
|
|
- const struct vkd3d_shader_src_param *src)
|
|
+static void vsir_validate_io_src_operand(struct validation_context *ctx, const struct vsir_src_operand *src)
|
|
{
|
|
struct vsir_io_register_data io_reg_data;
|
|
|
|
@@ -11209,8 +12152,7 @@ static void vsir_validate_io_src_param(struct validation_context *ctx,
|
|
#define U32_BIT (1u << VSIR_DATA_U32)
|
|
#define U16_BIT (1u << VSIR_DATA_U16)
|
|
|
|
-static void vsir_validate_src_param(struct validation_context *ctx,
|
|
- const struct vkd3d_shader_src_param *src)
|
|
+static void vsir_validate_src_operand(struct validation_context *ctx, const struct vsir_src_operand *src)
|
|
{
|
|
static const struct
|
|
{
|
|
@@ -11257,7 +12199,7 @@ static void vsir_validate_src_param(struct validation_context *ctx,
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_SWIZZLE,
|
|
"Immediate constant source has invalid swizzle %#x.", src->swizzle);
|
|
|
|
- if (src->modifiers >= VKD3DSPSM_COUNT || (ctx->program->has_no_modifiers && src->modifiers))
|
|
+ if (src->modifiers >= VKD3DSPSM_COUNT || (ctx->program->normalisation_flags.has_no_modifiers && src->modifiers))
|
|
validator_error(ctx, VKD3D_SHADER_ERROR_VSIR_INVALID_MODIFIERS, "Source has invalid modifiers %#x.",
|
|
src->modifiers);
|
|
|
|
@@ -11291,23 +12233,23 @@ static void vsir_validate_src_param(struct validation_context *ctx,
|
|
break;
|
|
|
|
case VKD3DSPR_INPUT:
|
|
- vsir_validate_io_src_param(ctx, src);
|
|
+ vsir_validate_io_src_operand(ctx, src);
|
|
break;
|
|
|
|
case VKD3DSPR_OUTPUT:
|
|
- vsir_validate_io_src_param(ctx, src);
|
|
+ vsir_validate_io_src_operand(ctx, src);
|
|
break;
|
|
|
|
case VKD3DSPR_INCONTROLPOINT:
|
|
- vsir_validate_io_src_param(ctx, src);
|
|
+ vsir_validate_io_src_operand(ctx, src);
|
|
break;
|
|
|
|
case VKD3DSPR_OUTCONTROLPOINT:
|
|
- vsir_validate_io_src_param(ctx, src);
|
|
+ vsir_validate_io_src_operand(ctx, src);
|
|
break;
|
|
|
|
case VKD3DSPR_PATCHCONST:
|
|
- vsir_validate_io_src_param(ctx, src);
|
|
+ vsir_validate_io_src_operand(ctx, src);
|
|
break;
|
|
|
|
default:
|
|
@@ -13157,10 +14099,14 @@ static void vsir_validate_instruction(struct validation_context *ctx,
|
|
size_t i;
|
|
|
|
for (i = 0; i < instruction->dst_count; ++i)
|
|
- vsir_validate_dst_param(ctx, &instruction->dst[i]);
|
|
+ {
|
|
+ vsir_validate_dst_operand(ctx, &instruction->dst[i]);
|
|
+ }
|
|
|
|
for (i = 0; i < instruction->src_count; ++i)
|
|
- vsir_validate_src_param(ctx, &instruction->src[i]);
|
|
+ {
|
|
+ vsir_validate_src_operand(ctx, &instruction->src[i]);
|
|
+ }
|
|
|
|
if (instruction->opcode >= VSIR_OP_INVALID)
|
|
{
|
|
@@ -13791,18 +14737,18 @@ static enum vkd3d_result vsir_program_dce(struct vsir_program *program,
|
|
|
|
for (unsigned int j = 0; j < ins->dst_count; ++j)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->dst[j];
|
|
+ struct vsir_dst_operand *dst = &ins->dst[j];
|
|
|
|
if (dst->reg.type == VKD3DSPR_SSA && !tracker.ssa_regs[dst->reg.idx[0].offset].last_read)
|
|
{
|
|
- vsir_dst_param_init_null(dst);
|
|
+ vsir_dst_operand_init_null(dst);
|
|
ctx->progress = true;
|
|
}
|
|
else if (dst->reg.type == VKD3DSPR_TEMP
|
|
&& tracker.temp_regs[dst->reg.idx[0].offset].last_read <= i
|
|
&& !(program->shader_version.major == 1 && dst->reg.idx[0].offset == 0))
|
|
{
|
|
- vsir_dst_param_init_null(dst);
|
|
+ vsir_dst_operand_init_null(dst);
|
|
ctx->progress = true;
|
|
}
|
|
else if (dst->reg.type != VKD3DSPR_NULL)
|
|
@@ -13985,9 +14931,9 @@ static enum vkd3d_result vsir_program_copy_propagation(struct vsir_program *prog
|
|
{
|
|
for (unsigned int j = 0; j < ins->src_count; ++j)
|
|
{
|
|
- struct vkd3d_shader_src_param *src = &ins->src[j];
|
|
- const struct vkd3d_shader_src_param *mov_src;
|
|
+ struct vsir_src_operand *src = &ins->src[j];
|
|
const struct vkd3d_shader_instruction *mov;
|
|
+ const struct vsir_src_operand *mov_src;
|
|
enum vsir_data_type data_type;
|
|
uint32_t new_swizzle = 0;
|
|
|
|
@@ -14160,6 +15106,7 @@ enum vkd3d_result vsir_program_transform(struct vsir_program *program, uint64_t
|
|
vsir_transform(&ctx, vsir_program_apply_flat_interpolation);
|
|
vsir_transform(&ctx, vsir_program_insert_alpha_test);
|
|
vsir_transform(&ctx, vsir_program_insert_clip_planes);
|
|
+ vsir_transform(&ctx, vsir_program_normalise_clip_cull);
|
|
vsir_transform(&ctx, vsir_program_insert_point_size);
|
|
vsir_transform(&ctx, vsir_program_insert_point_size_clamp);
|
|
vsir_transform(&ctx, vsir_program_insert_point_coord);
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/msl.c b/libs/vkd3d/libs/vkd3d-shader/msl.c
|
|
index d34133d6d4c..2049871752c 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/msl.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/msl.c
|
|
@@ -34,7 +34,7 @@ struct msl_src
|
|
|
|
struct msl_dst
|
|
{
|
|
- const struct vkd3d_shader_dst_param *vsir;
|
|
+ const struct vsir_dst_operand *vsir;
|
|
struct vkd3d_string_buffer *register_name;
|
|
struct vkd3d_string_buffer *mask;
|
|
};
|
|
@@ -71,7 +71,7 @@ struct msl_resource_type_info
|
|
};
|
|
|
|
static void msl_print_subscript(struct vkd3d_string_buffer *buffer, struct msl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *rel_addr, unsigned int offset);
|
|
+ const struct vsir_src_operand *rel_addr, unsigned int offset);
|
|
|
|
static void VKD3D_PRINTF_FUNC(3, 4) msl_compiler_error(struct msl_generator *gen,
|
|
enum vkd3d_shader_error error, const char *fmt, ...)
|
|
@@ -554,7 +554,7 @@ static void msl_print_bitcast(struct vkd3d_string_buffer *dst, struct msl_genera
|
|
}
|
|
|
|
static void msl_print_src_with_type(struct vkd3d_string_buffer *buffer, struct msl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *vsir_src, uint32_t mask, enum vsir_data_type data_type)
|
|
+ const struct vsir_src_operand *vsir_src, uint32_t mask, enum vsir_data_type data_type)
|
|
{
|
|
const struct vkd3d_shader_register *reg = &vsir_src->reg;
|
|
struct vkd3d_string_buffer *register_name;
|
|
@@ -573,7 +573,7 @@ static void msl_print_src_with_type(struct vkd3d_string_buffer *buffer, struct m
|
|
}
|
|
|
|
static void msl_src_init(struct msl_src *msl_src, struct msl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *vsir_src, uint32_t mask)
|
|
+ const struct vsir_src_operand *vsir_src, uint32_t mask)
|
|
{
|
|
msl_src->str = vkd3d_string_buffer_get(&gen->string_buffers);
|
|
msl_print_src_with_type(msl_src->str, gen, vsir_src, mask, vsir_src->reg.data_type);
|
|
@@ -586,7 +586,7 @@ static void msl_dst_cleanup(struct msl_dst *dst, struct vkd3d_string_buffer_cach
|
|
}
|
|
|
|
static uint32_t msl_dst_init(struct msl_dst *msl_dst, struct msl_generator *gen,
|
|
- const struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_dst_param *vsir_dst)
|
|
+ const struct vkd3d_shader_instruction *ins, const struct vsir_dst_operand *vsir_dst)
|
|
{
|
|
uint32_t write_mask = vsir_dst->write_mask;
|
|
enum msl_data_type dst_data_type;
|
|
@@ -612,7 +612,7 @@ static uint32_t msl_dst_init(struct msl_dst *msl_dst, struct msl_generator *gen,
|
|
}
|
|
|
|
static void msl_print_subscript(struct vkd3d_string_buffer *buffer, struct msl_generator *gen,
|
|
- const struct vkd3d_shader_src_param *rel_addr, unsigned int offset)
|
|
+ const struct vsir_src_operand *rel_addr, unsigned int offset)
|
|
{
|
|
struct msl_src r;
|
|
|
|
@@ -803,7 +803,7 @@ static void msl_begin_block(struct msl_generator *gen)
|
|
}
|
|
|
|
static void msl_print_condition(struct vkd3d_string_buffer *buffer, struct msl_generator *gen,
|
|
- enum vkd3d_shader_conditional_op op, const struct vkd3d_shader_src_param *arg)
|
|
+ enum vkd3d_shader_conditional_op op, const struct vsir_src_operand *arg)
|
|
{
|
|
const char *condition;
|
|
struct msl_src src;
|
|
@@ -1028,9 +1028,9 @@ static void msl_sample(struct msl_generator *gen, const struct vkd3d_shader_inst
|
|
{
|
|
bool bias, compare, comparison_sampler, dynamic_offset, gather, grad, lod, lod_zero, offset;
|
|
const struct msl_resource_type_info *resource_type_info;
|
|
- const struct vkd3d_shader_src_param *resource, *sampler;
|
|
unsigned int resource_id, resource_idx, resource_space;
|
|
unsigned int sampler_id, sampler_idx, sampler_space;
|
|
+ const struct vsir_src_operand *resource, *sampler;
|
|
unsigned int srv_binding = 0, sampler_binding = 0;
|
|
const struct vkd3d_shader_descriptor_info1 *d;
|
|
enum vkd3d_shader_resource_type resource_type;
|
|
@@ -2401,8 +2401,8 @@ int msl_compile(struct vsir_program *program, uint64_t config_flags,
|
|
return ret;
|
|
|
|
VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_SM6);
|
|
- VKD3D_ASSERT(program->has_descriptor_info);
|
|
- VKD3D_ASSERT(program->has_no_modifiers);
|
|
+ VKD3D_ASSERT(program->normalisation_flags.has_descriptor_info);
|
|
+ VKD3D_ASSERT(program->normalisation_flags.has_no_modifiers);
|
|
|
|
if ((ret = msl_generator_init(&generator, program, compile_info, message_context)) < 0)
|
|
return ret;
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/preproc.l b/libs/vkd3d/libs/vkd3d-shader/preproc.l
|
|
index 8913e57283a..f9b1d67ac36 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/preproc.l
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/preproc.l
|
|
@@ -20,6 +20,7 @@
|
|
|
|
%{
|
|
|
|
+#include "preproc.h"
|
|
#include "preproc.tab.h"
|
|
|
|
#undef ERROR /* defined in wingdi.h */
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/spirv.c b/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
index 0d260d63542..e28726f1de9 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/spirv.c
|
|
@@ -2972,7 +2972,6 @@ struct spirv_compiler
|
|
{
|
|
uint32_t id;
|
|
enum vsir_data_type data_type;
|
|
- uint32_t array_element_mask;
|
|
} *output_info;
|
|
uint32_t private_output_variable[MAX_REG_OUTPUT + 1]; /* 1 entry for oDepth */
|
|
uint32_t private_output_variable_write_mask[MAX_REG_OUTPUT + 1]; /* 1 entry for oDepth */
|
|
@@ -3019,16 +3018,6 @@ static bool is_in_default_phase(const struct spirv_compiler *compiler)
|
|
return compiler->phase == VSIR_OP_INVALID;
|
|
}
|
|
|
|
-static bool is_in_control_point_phase(const struct spirv_compiler *compiler)
|
|
-{
|
|
- return compiler->phase == VSIR_OP_HS_CONTROL_POINT_PHASE;
|
|
-}
|
|
-
|
|
-static bool is_in_fork_or_join_phase(const struct spirv_compiler *compiler)
|
|
-{
|
|
- return compiler->phase == VSIR_OP_HS_FORK_PHASE || compiler->phase == VSIR_OP_HS_JOIN_PHASE;
|
|
-}
|
|
-
|
|
static void spirv_compiler_emit_initial_declarations(struct spirv_compiler *compiler);
|
|
static size_t spirv_compiler_get_current_function_location(struct spirv_compiler *compiler);
|
|
static void spirv_compiler_emit_main_prolog(struct spirv_compiler *compiler);
|
|
@@ -3764,7 +3753,7 @@ static uint32_t spirv_compiler_get_type_id_for_reg(struct spirv_compiler *compil
|
|
}
|
|
|
|
static uint32_t spirv_compiler_get_type_id_for_dst(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_dst_param *dst)
|
|
+ const struct vsir_dst_operand *dst)
|
|
{
|
|
return spirv_compiler_get_type_id_for_reg(compiler, &dst->reg, dst->write_mask);
|
|
}
|
|
@@ -4117,7 +4106,7 @@ static uint32_t spirv_compiler_emit_construct_vector(struct spirv_compiler *comp
|
|
}
|
|
|
|
static uint32_t spirv_compiler_emit_load_src(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_src_param *src, uint32_t write_mask);
|
|
+ const struct vsir_src_operand *src, uint32_t write_mask);
|
|
|
|
static uint32_t spirv_compiler_emit_register_addressing(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_register_index *reg_index)
|
|
@@ -4827,19 +4816,19 @@ static void spirv_compiler_emit_execution_mode1(struct spirv_compiler *compiler,
|
|
}
|
|
|
|
static uint32_t spirv_compiler_emit_load_src(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_src_param *src, uint32_t write_mask)
|
|
+ const struct vsir_src_operand *src, uint32_t write_mask)
|
|
{
|
|
return spirv_compiler_emit_load_reg(compiler, &src->reg, src->swizzle, write_mask);
|
|
}
|
|
|
|
static uint32_t spirv_compiler_emit_load_src_with_type(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_src_param *src, uint32_t write_mask, enum vsir_data_type data_type)
|
|
+ const struct vsir_src_operand *src, uint32_t write_mask, enum vsir_data_type data_type)
|
|
{
|
|
- struct vkd3d_shader_src_param src_param = *src;
|
|
+ struct vsir_src_operand src_operand = *src;
|
|
|
|
- src_param.reg.data_type = data_type;
|
|
+ src_operand.reg.data_type = data_type;
|
|
|
|
- return spirv_compiler_emit_load_src(compiler, &src_param, write_mask);
|
|
+ return spirv_compiler_emit_load_src(compiler, &src_operand, write_mask);
|
|
}
|
|
|
|
static void spirv_compiler_emit_store_scalar(struct spirv_compiler *compiler,
|
|
@@ -4981,15 +4970,15 @@ static uint32_t spirv_compiler_emit_sat(struct spirv_compiler *compiler,
|
|
}
|
|
|
|
static void spirv_compiler_emit_store_dst(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_dst_param *dst, uint32_t val_id)
|
|
+ const struct vsir_dst_operand *dst, uint32_t val_id)
|
|
{
|
|
spirv_compiler_emit_store_reg(compiler, &dst->reg, dst->write_mask, val_id);
|
|
}
|
|
|
|
static void spirv_compiler_emit_store_dst_swizzled(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_dst_param *dst, uint32_t val_id, enum vsir_data_type data_type, uint32_t swizzle)
|
|
+ const struct vsir_dst_operand *dst, uint32_t val_id, enum vsir_data_type data_type, uint32_t swizzle)
|
|
{
|
|
- struct vkd3d_shader_dst_param typed_dst = *dst;
|
|
+ struct vsir_dst_operand typed_dst = *dst;
|
|
|
|
val_id = spirv_compiler_emit_swizzle(compiler, val_id,
|
|
VKD3DSP_WRITEMASK_ALL, data_type, swizzle, dst->write_mask);
|
|
@@ -5000,7 +4989,7 @@ static void spirv_compiler_emit_store_dst_swizzled(struct spirv_compiler *compil
|
|
}
|
|
|
|
static void spirv_compiler_emit_store_dst_components(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_dst_param *dst, enum vsir_data_type data_type, uint32_t *component_ids)
|
|
+ const struct vsir_dst_operand *dst, enum vsir_data_type data_type, uint32_t *component_ids)
|
|
{
|
|
unsigned int component_count = vsir_write_mask_component_count(dst->write_mask);
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
@@ -5020,7 +5009,7 @@ static void spirv_compiler_emit_store_dst_components(struct spirv_compiler *comp
|
|
}
|
|
|
|
static void spirv_compiler_emit_store_dst_scalar(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_dst_param *dst, uint32_t val_id, enum vsir_data_type data_type, uint32_t swizzle)
|
|
+ const struct vsir_dst_operand *dst, uint32_t val_id, enum vsir_data_type data_type, uint32_t swizzle)
|
|
{
|
|
unsigned int component_count = vsir_write_mask_component_count(dst->write_mask);
|
|
uint32_t component_ids[VKD3D_VEC4_SIZE];
|
|
@@ -5462,7 +5451,8 @@ static const struct vkd3d_shader_phase *spirv_compiler_get_current_shader_phase(
|
|
if (is_in_default_phase(compiler))
|
|
return NULL;
|
|
|
|
- return is_in_control_point_phase(compiler) ? &compiler->control_point_phase : &compiler->patch_constant_phase;
|
|
+ return vsir_opcode_is_control_point_phase(compiler->phase)
|
|
+ ? &compiler->control_point_phase : &compiler->patch_constant_phase;
|
|
}
|
|
|
|
static void spirv_compiler_decorate_xfb_output(struct spirv_compiler *compiler,
|
|
@@ -5561,31 +5551,21 @@ static bool needs_private_io_variable(const struct vkd3d_spirv_builtin *builtin)
|
|
return builtin && builtin->fixup_pfn;
|
|
}
|
|
|
|
-static unsigned int shader_signature_next_location(const struct shader_signature *signature)
|
|
-{
|
|
- unsigned int i, max_row;
|
|
-
|
|
- if (!signature)
|
|
- return 0;
|
|
-
|
|
- for (i = 0, max_row = 0; i < signature->element_count; ++i)
|
|
- max_row = max(max_row, signature->elements[i].register_index + signature->elements[i].register_count);
|
|
- return max_row;
|
|
-}
|
|
-
|
|
static const struct vkd3d_symbol *spirv_compiler_emit_io_register(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_dst_param *dst)
|
|
+ const struct vsir_dst_operand *dst)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
const struct vkd3d_shader_register *reg = &dst->reg;
|
|
const struct vkd3d_spirv_builtin *builtin;
|
|
struct vkd3d_symbol reg_symbol;
|
|
SpvStorageClass storage_class;
|
|
+ unsigned int array_size;
|
|
uint32_t write_mask, id;
|
|
struct rb_entry *entry;
|
|
|
|
- VKD3D_ASSERT(!reg->idx_count || !reg->idx[0].rel_addr);
|
|
- VKD3D_ASSERT(reg->idx_count < 2);
|
|
+ VKD3D_ASSERT(reg->idx_count < 1 || !reg->idx[0].rel_addr);
|
|
+ VKD3D_ASSERT(reg->idx_count < 2 || !reg->idx[1].rel_addr);
|
|
+ VKD3D_ASSERT(reg->idx_count < 3);
|
|
|
|
if (reg->type == VKD3DSPR_RASTOUT && reg->idx[0].offset == VSIR_RASTOUT_POINT_SIZE)
|
|
{
|
|
@@ -5603,7 +5583,8 @@ static const struct vkd3d_symbol *spirv_compiler_emit_io_register(struct spirv_c
|
|
if ((entry = rb_get(&compiler->symbol_table, ®_symbol)))
|
|
return RB_ENTRY_VALUE(entry, struct vkd3d_symbol, entry);
|
|
|
|
- id = spirv_compiler_emit_builtin_variable(compiler, builtin, storage_class, 0);
|
|
+ array_size = (reg->idx_count > 1) ? reg->idx[0].offset : 0;
|
|
+ id = spirv_compiler_emit_builtin_variable(compiler, builtin, storage_class, array_size);
|
|
spirv_compiler_emit_register_execution_mode(compiler, reg->type);
|
|
spirv_compiler_emit_register_debug_name(builder, id, reg);
|
|
|
|
@@ -5650,10 +5631,10 @@ static void spirv_compiler_emit_input(struct spirv_compiler *compiler,
|
|
sysval_reg_type = vsir_register_type_from_sysval_input(signature_element->sysval_semantic);
|
|
if (sysval_reg_type != VKD3DSPR_INPUT)
|
|
{
|
|
- struct vkd3d_shader_dst_param dst;
|
|
const struct vkd3d_symbol *symbol;
|
|
+ struct vsir_dst_operand dst;
|
|
|
|
- vsir_dst_param_init(&dst, sysval_reg_type, VSIR_DATA_F32, 0);
|
|
+ vsir_dst_operand_init(&dst, sysval_reg_type, VSIR_DATA_F32, 0);
|
|
symbol = spirv_compiler_emit_io_register(compiler, &dst);
|
|
|
|
vkd3d_symbol_make_io(®_symbol, reg_type, element_idx);
|
|
@@ -5667,11 +5648,8 @@ static void spirv_compiler_emit_input(struct spirv_compiler *compiler,
|
|
|
|
array_sizes[0] = signature_element->register_count;
|
|
array_sizes[1] = (reg_type == VKD3DSPR_PATCHCONST ? 0 : compiler->input_control_point_count);
|
|
- if (array_sizes[0] == 1 && !vsir_sysval_semantic_is_tess_factor(signature_element->sysval_semantic)
|
|
- && (!vsir_sysval_semantic_is_clip_cull(signature_element->sysval_semantic) || array_sizes[1]))
|
|
- {
|
|
+ if (!vsir_signature_element_is_array(signature_element, &compiler->program->normalisation_flags))
|
|
array_sizes[0] = 0;
|
|
- }
|
|
|
|
write_mask = signature_element->mask;
|
|
|
|
@@ -5708,7 +5686,7 @@ static void spirv_compiler_emit_input(struct spirv_compiler *compiler,
|
|
* duplicate declarations are: a single register split into multiple declarations having
|
|
* different components, which should have been merged, and declarations in one phase
|
|
* being repeated in another (i.e. vcp/vocp), which should have been deleted. */
|
|
- if (reg_type != VKD3DSPR_INPUT || !is_in_fork_or_join_phase(compiler))
|
|
+ if (reg_type != VKD3DSPR_INPUT || !vsir_opcode_is_fork_or_join_phase(compiler->phase))
|
|
FIXME("Duplicate input definition found.\n");
|
|
return;
|
|
}
|
|
@@ -5729,7 +5707,7 @@ static void spirv_compiler_emit_input(struct spirv_compiler *compiler,
|
|
if (reg_type == VKD3DSPR_PATCHCONST)
|
|
{
|
|
vkd3d_spirv_build_op_decorate(builder, input_id, SpvDecorationPatch, NULL, 0);
|
|
- location += shader_signature_next_location(&compiler->program->input_signature);
|
|
+ location += vsir_signature_next_location(&compiler->program->input_signature);
|
|
}
|
|
vkd3d_spirv_build_op_decorate1(builder, input_id, SpvDecorationLocation, location);
|
|
if (component_idx)
|
|
@@ -5803,88 +5781,6 @@ static bool is_dual_source_blending(const struct spirv_compiler *compiler)
|
|
return compiler->shader_type == VKD3D_SHADER_TYPE_PIXEL && info && info->dual_source_blending;
|
|
}
|
|
|
|
-static void calculate_clip_or_cull_distance_mask(const struct signature_element *e, uint32_t *mask)
|
|
-{
|
|
- unsigned int write_mask;
|
|
-
|
|
- if (e->semantic_index >= sizeof(*mask) * CHAR_BIT / VKD3D_VEC4_SIZE)
|
|
- {
|
|
- FIXME("Invalid semantic index %u for clip/cull distance.\n", e->semantic_index);
|
|
- return;
|
|
- }
|
|
-
|
|
- write_mask = e->mask;
|
|
- *mask |= (write_mask & VKD3DSP_WRITEMASK_ALL) << (VKD3D_VEC4_SIZE * e->semantic_index);
|
|
-}
|
|
-
|
|
-/* Emits arrayed SPIR-V built-in variables. */
|
|
-static void spirv_compiler_emit_shader_signature_outputs(struct spirv_compiler *compiler)
|
|
-{
|
|
- const struct shader_signature *output_signature = &compiler->program->output_signature;
|
|
- uint32_t clip_distance_mask = 0, clip_distance_id = 0;
|
|
- uint32_t cull_distance_mask = 0, cull_distance_id = 0;
|
|
- const struct vkd3d_spirv_builtin *builtin;
|
|
- unsigned int i, count;
|
|
-
|
|
- for (i = 0; i < output_signature->element_count; ++i)
|
|
- {
|
|
- const struct signature_element *e = &output_signature->elements[i];
|
|
-
|
|
- switch (e->sysval_semantic)
|
|
- {
|
|
- case VKD3D_SHADER_SV_CLIP_DISTANCE:
|
|
- calculate_clip_or_cull_distance_mask(e, &clip_distance_mask);
|
|
- break;
|
|
-
|
|
- case VKD3D_SHADER_SV_CULL_DISTANCE:
|
|
- calculate_clip_or_cull_distance_mask(e, &cull_distance_mask);
|
|
- break;
|
|
-
|
|
- default:
|
|
- break;
|
|
- }
|
|
- }
|
|
-
|
|
- if (clip_distance_mask)
|
|
- {
|
|
- count = vkd3d_popcount(clip_distance_mask);
|
|
- builtin = get_spirv_builtin_for_sysval(compiler, VKD3D_SHADER_SV_CLIP_DISTANCE);
|
|
- clip_distance_id = spirv_compiler_emit_builtin_variable(compiler,
|
|
- builtin, SpvStorageClassOutput, count);
|
|
- }
|
|
-
|
|
- if (cull_distance_mask)
|
|
- {
|
|
- count = vkd3d_popcount(cull_distance_mask);
|
|
- builtin = get_spirv_builtin_for_sysval(compiler, VKD3D_SHADER_SV_CULL_DISTANCE);
|
|
- cull_distance_id = spirv_compiler_emit_builtin_variable(compiler,
|
|
- builtin, SpvStorageClassOutput, count);
|
|
- }
|
|
-
|
|
- for (i = 0; i < output_signature->element_count; ++i)
|
|
- {
|
|
- const struct signature_element *e = &output_signature->elements[i];
|
|
-
|
|
- switch (e->sysval_semantic)
|
|
- {
|
|
- case VKD3D_SHADER_SV_CLIP_DISTANCE:
|
|
- compiler->output_info[i].id = clip_distance_id;
|
|
- compiler->output_info[i].data_type = VSIR_DATA_F32;
|
|
- compiler->output_info[i].array_element_mask = clip_distance_mask;
|
|
- break;
|
|
-
|
|
- case VKD3D_SHADER_SV_CULL_DISTANCE:
|
|
- compiler->output_info[i].id = cull_distance_id;
|
|
- compiler->output_info[i].data_type = VSIR_DATA_F32;
|
|
- compiler->output_info[i].array_element_mask = cull_distance_mask;
|
|
- break;
|
|
-
|
|
- default:
|
|
- break;
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
static uint32_t spirv_compiler_emit_shader_phase_builtin_variable(struct spirv_compiler *compiler,
|
|
const struct vkd3d_spirv_builtin *builtin, const unsigned int *array_sizes, unsigned int size_count)
|
|
{
|
|
@@ -5902,7 +5798,7 @@ static uint32_t spirv_compiler_emit_shader_phase_builtin_variable(struct spirv_c
|
|
return *variable_id;
|
|
|
|
id = spirv_compiler_emit_builtin_variable_v(compiler, builtin, SpvStorageClassOutput, array_sizes, size_count);
|
|
- if (is_in_fork_or_join_phase(compiler))
|
|
+ if (vsir_opcode_is_fork_or_join_phase(compiler->phase))
|
|
vkd3d_spirv_build_op_decorate(builder, id, SpvDecorationPatch, NULL, 0);
|
|
|
|
if (variable_id)
|
|
@@ -5940,7 +5836,7 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
|
|
sysval = VKD3D_SHADER_SV_NONE;
|
|
array_sizes[0] = signature_element->register_count;
|
|
array_sizes[1] = (reg_type == VKD3DSPR_PATCHCONST ? 0 : compiler->output_control_point_count);
|
|
- if (array_sizes[0] == 1 && !vsir_sysval_semantic_is_tess_factor(signature_element->sysval_semantic))
|
|
+ if (!vsir_signature_element_is_array(signature_element, &compiler->program->normalisation_flags))
|
|
array_sizes[0] = 0;
|
|
|
|
builtin = vkd3d_get_spirv_builtin(compiler, reg_type, sysval);
|
|
@@ -5966,8 +5862,7 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
|
|
use_private_variable = true;
|
|
|
|
if (!is_patch_constant
|
|
- && (get_shader_output_swizzle(compiler, signature_element->register_index) != VKD3D_SHADER_NO_SWIZZLE
|
|
- || (compiler->output_info[element_idx].id && compiler->output_info[element_idx].array_element_mask)))
|
|
+ && get_shader_output_swizzle(compiler, signature_element->register_index) != VKD3D_SHADER_NO_SWIZZLE)
|
|
{
|
|
use_private_variable = true;
|
|
}
|
|
@@ -6005,7 +5900,7 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
|
|
unsigned int location = signature_element->target_location;
|
|
|
|
if (is_patch_constant)
|
|
- location += shader_signature_next_location(&compiler->program->output_signature);
|
|
+ location += vsir_signature_next_location(&compiler->program->output_signature);
|
|
else if (compiler->shader_type == VKD3D_SHADER_TYPE_PIXEL
|
|
&& signature_element->sysval_semantic == VKD3D_SHADER_SV_TARGET)
|
|
location = signature_element->semantic_index;
|
|
@@ -6066,36 +5961,18 @@ static void spirv_compiler_emit_output(struct spirv_compiler *compiler,
|
|
}
|
|
}
|
|
|
|
-static uint32_t spirv_compiler_get_output_array_index(struct spirv_compiler *compiler,
|
|
- const struct signature_element *e)
|
|
-{
|
|
- enum vkd3d_shader_sysval_semantic sysval = e->sysval_semantic;
|
|
- const struct vkd3d_spirv_builtin *builtin;
|
|
-
|
|
- builtin = get_spirv_builtin_for_sysval(compiler, sysval);
|
|
-
|
|
- switch (sysval)
|
|
- {
|
|
- case VKD3D_SHADER_SV_TESS_FACTOR_LINEDEN:
|
|
- case VKD3D_SHADER_SV_TESS_FACTOR_LINEDET:
|
|
- return builtin->member_idx;
|
|
- default:
|
|
- return e->semantic_index;
|
|
- }
|
|
-}
|
|
-
|
|
static void spirv_compiler_emit_store_shader_output(struct spirv_compiler *compiler,
|
|
const struct shader_signature *signature, const struct signature_element *output,
|
|
const struct vkd3d_shader_output_info *output_info,
|
|
uint32_t output_index_id, uint32_t val_id, uint32_t write_mask)
|
|
{
|
|
- uint32_t dst_write_mask, use_mask, uninit_mask, swizzle, mask;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- uint32_t type_id, zero_id, ptr_type_id, chain_id, object_id;
|
|
+ uint32_t dst_write_mask, use_mask, uninit_mask, swizzle;
|
|
const struct signature_element *element;
|
|
- unsigned int i, index, array_idx;
|
|
+ uint32_t type_id, zero_id, ptr_type_id;
|
|
enum vsir_data_type data_type;
|
|
uint32_t output_id;
|
|
+ unsigned int i;
|
|
|
|
dst_write_mask = output->mask;
|
|
use_mask = output->used_mask;
|
|
@@ -6149,31 +6026,8 @@ static void spirv_compiler_emit_store_shader_output(struct spirv_compiler *compi
|
|
output_id = vkd3d_spirv_build_op_access_chain1(builder, ptr_type_id, output_id, output_index_id);
|
|
}
|
|
|
|
- if (!output_info->array_element_mask)
|
|
- {
|
|
- spirv_compiler_emit_store(compiler, output_id, dst_write_mask,
|
|
- data_type, SpvStorageClassOutput, write_mask, val_id);
|
|
- return;
|
|
- }
|
|
-
|
|
- type_id = spirv_get_type_id(compiler, data_type, 1);
|
|
- ptr_type_id = vkd3d_spirv_get_op_type_pointer(builder, SpvStorageClassOutput, type_id);
|
|
- mask = output_info->array_element_mask;
|
|
- array_idx = spirv_compiler_get_output_array_index(compiler, output);
|
|
- mask &= (1u << (array_idx * VKD3D_VEC4_SIZE)) - 1;
|
|
- for (i = 0, index = vkd3d_popcount(mask); i < VKD3D_VEC4_SIZE; ++i)
|
|
- {
|
|
- if (!(write_mask & (VKD3DSP_WRITEMASK_0 << i)))
|
|
- continue;
|
|
-
|
|
- chain_id = vkd3d_spirv_build_op_access_chain1(builder,
|
|
- ptr_type_id, output_id, spirv_compiler_get_constant_uint(compiler, index));
|
|
- object_id = spirv_compiler_emit_swizzle(compiler, val_id, write_mask,
|
|
- data_type, VKD3D_SHADER_NO_SWIZZLE, VKD3DSP_WRITEMASK_0 << i);
|
|
- spirv_compiler_emit_store(compiler, chain_id, VKD3DSP_WRITEMASK_0, data_type,
|
|
- SpvStorageClassOutput, VKD3DSP_WRITEMASK_0 << i, object_id);
|
|
- ++index;
|
|
- }
|
|
+ spirv_compiler_emit_store(compiler, output_id, dst_write_mask,
|
|
+ data_type, SpvStorageClassOutput, write_mask, val_id);
|
|
}
|
|
|
|
static void spirv_compiler_emit_shader_epilogue_function(struct spirv_compiler *compiler)
|
|
@@ -6190,7 +6044,7 @@ static void spirv_compiler_emit_shader_epilogue_function(struct spirv_compiler *
|
|
STATIC_ASSERT(ARRAY_SIZE(compiler->private_output_variable) == ARRAY_SIZE(param_type_id));
|
|
STATIC_ASSERT(ARRAY_SIZE(compiler->private_output_variable) == ARRAY_SIZE(compiler->private_output_variable_write_mask));
|
|
|
|
- is_patch_constant = is_in_fork_or_join_phase(compiler);
|
|
+ is_patch_constant = vsir_opcode_is_fork_or_join_phase(compiler->phase);
|
|
|
|
signature = is_patch_constant ? &compiler->program->patch_constant_signature
|
|
: &compiler->program->output_signature;
|
|
@@ -6224,7 +6078,7 @@ static void spirv_compiler_emit_shader_epilogue_function(struct spirv_compiler *
|
|
param_id[i] = vkd3d_spirv_build_op_load(builder, type_id, param_id[i], SpvMemoryAccessMaskNone);
|
|
}
|
|
|
|
- if (is_in_control_point_phase(compiler))
|
|
+ if (vsir_opcode_is_control_point_phase(compiler->phase))
|
|
output_index_id = spirv_compiler_emit_load_invocation_id(compiler);
|
|
|
|
for (i = 0; i < signature->element_count; ++i)
|
|
@@ -7259,7 +7113,7 @@ static void spirv_compiler_leave_shader_phase(struct spirv_compiler *compiler)
|
|
|
|
vkd3d_spirv_build_op_function_end(builder);
|
|
|
|
- if (is_in_control_point_phase(compiler))
|
|
+ if (vsir_opcode_is_control_point_phase(compiler->phase))
|
|
{
|
|
if (compiler->epilogue_function_id)
|
|
{
|
|
@@ -7296,8 +7150,8 @@ static void spirv_compiler_enter_shader_phase(struct spirv_compiler *compiler,
|
|
compiler->phase = instruction->opcode;
|
|
spirv_compiler_emit_shader_phase_name(compiler, function_id, NULL);
|
|
|
|
- phase = (instruction->opcode == VSIR_OP_HS_CONTROL_POINT_PHASE)
|
|
- ? &compiler->control_point_phase : &compiler->patch_constant_phase;
|
|
+ phase = vsir_opcode_is_control_point_phase(instruction->opcode)
|
|
+ ? &compiler->control_point_phase : &compiler->patch_constant_phase;
|
|
phase->function_id = function_id;
|
|
/* The insertion location must be set after the label is emitted. */
|
|
phase->function_location = 0;
|
|
@@ -7310,8 +7164,8 @@ static void spirv_compiler_initialise_block(struct spirv_compiler *compiler)
|
|
/* Insertion locations must point immediately after the function's initial label. */
|
|
if (compiler->shader_type == VKD3D_SHADER_TYPE_HULL)
|
|
{
|
|
- struct vkd3d_shader_phase *phase = (compiler->phase == VSIR_OP_HS_CONTROL_POINT_PHASE)
|
|
- ? &compiler->control_point_phase : &compiler->patch_constant_phase;
|
|
+ struct vkd3d_shader_phase *phase = vsir_opcode_is_control_point_phase(compiler->phase)
|
|
+ ? &compiler->control_point_phase : &compiler->patch_constant_phase;
|
|
if (!phase->function_location)
|
|
phase->function_location = vkd3d_spirv_stream_current_location(&builder->function_stream);
|
|
}
|
|
@@ -7477,8 +7331,8 @@ static SpvOp spirv_compiler_map_logical_instruction(const struct vkd3d_shader_in
|
|
static void spirv_compiler_emit_bool_cast(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t val_id;
|
|
|
|
VKD3D_ASSERT(src->reg.data_type == VSIR_DATA_BOOL && dst->reg.data_type != VSIR_DATA_BOOL);
|
|
@@ -7516,8 +7370,8 @@ static enum vkd3d_result spirv_compiler_emit_alu_instruction(struct spirv_compil
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t src_ids[SPIRV_MAX_SRC_COUNT];
|
|
uint32_t type_id, val_id;
|
|
SpvOp op = SpvOpMax;
|
|
@@ -7596,8 +7450,8 @@ static enum vkd3d_result spirv_compiler_emit_alu_instruction(struct spirv_compil
|
|
static void spirv_compiler_emit_saturate(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t val_id;
|
|
|
|
val_id = spirv_compiler_emit_load_src(compiler, src, dst->write_mask);
|
|
@@ -7609,8 +7463,8 @@ static void spirv_compiler_emit_isfinite(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, src_id, isinf_id, isnan_id, val_id;
|
|
|
|
type_id = spirv_compiler_get_type_id_for_dst(compiler, dst);
|
|
@@ -7681,8 +7535,8 @@ static void spirv_compiler_emit_ext_glsl_instruction(struct spirv_compiler *comp
|
|
{
|
|
uint32_t instr_set_id, type_id, val_id, rev_val_id, uint_max_id, condition_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t src_id[SPIRV_MAX_SRC_COUNT];
|
|
unsigned int i, component_count;
|
|
enum GLSLstd450 glsl_inst;
|
|
@@ -7741,8 +7595,8 @@ static void spirv_compiler_emit_mov(struct spirv_compiler *compiler,
|
|
uint32_t val_id, dst_val_id, type_id, dst_id, src_id, write_mask32, swizzle32;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
struct vkd3d_shader_register_info dst_reg_info, src_reg_info;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
unsigned int i, component_count, write_mask;
|
|
uint32_t components[VKD3D_VEC4_SIZE];
|
|
|
|
@@ -7815,9 +7669,9 @@ static void spirv_compiler_emit_movc(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
uint32_t condition_id, src1_id, src2_id, type_id, val_id;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
unsigned int component_count;
|
|
|
|
condition_id = spirv_compiler_emit_load_src(compiler, &src[0], dst->write_mask);
|
|
@@ -7846,9 +7700,9 @@ static void spirv_compiler_emit_swapc(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
uint32_t condition_id, src1_id, src2_id, type_id, val_id;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
unsigned int component_count;
|
|
|
|
VKD3D_ASSERT(dst[0].write_mask == dst[1].write_mask);
|
|
@@ -7873,8 +7727,8 @@ static void spirv_compiler_emit_dot(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, val_id, src_ids[2];
|
|
unsigned int component_count, i;
|
|
enum vsir_data_type data_type;
|
|
@@ -7910,8 +7764,8 @@ static void spirv_compiler_emit_rcp(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, src_id, val_id, div_id;
|
|
unsigned int component_count;
|
|
|
|
@@ -7931,8 +7785,8 @@ static void spirv_compiler_emit_imad(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, val_id, src_ids[3];
|
|
unsigned int i, component_count;
|
|
|
|
@@ -7953,8 +7807,8 @@ static void spirv_compiler_emit_ftoi(struct spirv_compiler *compiler,
|
|
{
|
|
uint32_t src_id, int_min_id, int_max_id, zero_id, float_max_id, condition_id, val_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t src_type_id, dst_type_id, condition_type_id;
|
|
unsigned int component_count;
|
|
uint32_t write_mask;
|
|
@@ -8008,8 +7862,8 @@ static void spirv_compiler_emit_ftou(struct spirv_compiler *compiler,
|
|
{
|
|
uint32_t src_id, zero_id, uint_max_id, float_max_id, condition_id, val_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t src_type_id, dst_type_id, condition_type_id;
|
|
unsigned int component_count;
|
|
uint32_t write_mask;
|
|
@@ -8056,8 +7910,8 @@ static void spirv_compiler_emit_dtof(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, val_id, src_id;
|
|
unsigned int component_count;
|
|
uint32_t write_mask;
|
|
@@ -8080,8 +7934,8 @@ static void spirv_compiler_emit_bitfield_instruction(struct spirv_compiler *comp
|
|
{
|
|
uint32_t src_ids[4], constituents[VKD3D_VEC4_SIZE], type_id, mask_id, size_id, max_count_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
unsigned int i, j, k, src_count, size;
|
|
enum vsir_data_type data_type;
|
|
uint32_t write_mask;
|
|
@@ -8141,8 +7995,8 @@ static void spirv_compiler_emit_f16tof32(struct spirv_compiler *compiler,
|
|
{
|
|
uint32_t instr_set_id, type_id, scalar_type_id, src_id, result_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t components[VKD3D_VEC4_SIZE];
|
|
uint32_t write_mask;
|
|
unsigned int i, j;
|
|
@@ -8173,8 +8027,8 @@ static void spirv_compiler_emit_f32tof16(struct spirv_compiler *compiler,
|
|
{
|
|
uint32_t instr_set_id, type_id, scalar_type_id, src_id, zero_id, constituents[2];
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t components[VKD3D_VEC4_SIZE];
|
|
uint32_t write_mask;
|
|
unsigned int i, j;
|
|
@@ -8207,8 +8061,8 @@ static void spirv_compiler_emit_comparison_instruction(struct spirv_compiler *co
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t src0_id, src1_id, type_id, result_id;
|
|
uint32_t write_mask = dst->write_mask;
|
|
unsigned int component_count;
|
|
@@ -8272,8 +8126,8 @@ static void spirv_compiler_emit_orderedness_instruction(struct spirv_compiler *c
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, src0_id, src1_id, val_id;
|
|
|
|
type_id = spirv_compiler_get_type_id_for_dst(compiler, dst);
|
|
@@ -8292,8 +8146,8 @@ static void spirv_compiler_emit_float_comparison_instruction(struct spirv_compil
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t src0_id, src1_id, type_id, result_id;
|
|
unsigned int component_count;
|
|
SpvOp op;
|
|
@@ -8322,7 +8176,7 @@ static uint32_t spirv_compiler_emit_conditional_branch(struct spirv_compiler *co
|
|
const struct vkd3d_shader_instruction *instruction, uint32_t target_block_id)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
uint32_t condition_id, merge_block_id;
|
|
|
|
condition_id = spirv_compiler_emit_load_src(compiler, src, VKD3DSP_WRITEMASK_0);
|
|
@@ -8362,7 +8216,7 @@ static void spirv_compiler_emit_return(struct spirv_compiler *compiler,
|
|
spirv_compiler_end_invocation_interlock(compiler);
|
|
|
|
if (compiler->shader_type != VKD3D_SHADER_TYPE_GEOMETRY && (is_in_default_phase(compiler)
|
|
- || is_in_control_point_phase(compiler)))
|
|
+ || vsir_opcode_is_control_point_phase(compiler->phase)))
|
|
spirv_compiler_emit_shader_epilogue_invocation(compiler);
|
|
|
|
vkd3d_spirv_build_op_return(builder);
|
|
@@ -8437,7 +8291,7 @@ static void spirv_compiler_emit_discard(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
uint32_t condition_id, void_id;
|
|
|
|
/* discard is not a block terminator in VSIR, and emitting it as such in SPIR-V would cause
|
|
@@ -8468,7 +8322,7 @@ static void spirv_compiler_emit_label(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
unsigned int block_id = src->reg.idx[0].offset;
|
|
uint32_t label_id;
|
|
|
|
@@ -8506,7 +8360,7 @@ static void spirv_compiler_emit_branch(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
uint32_t condition_id;
|
|
|
|
if (vsir_register_is_label(&src[0].reg))
|
|
@@ -8549,7 +8403,7 @@ static void spirv_compiler_emit_switch(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
uint32_t val_id, default_id;
|
|
unsigned int i, word_count;
|
|
uint32_t *cases;
|
|
@@ -8590,8 +8444,8 @@ static void spirv_compiler_emit_deriv_instruction(struct spirv_compiler *compile
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
const struct instruction_info *info;
|
|
uint32_t type_id, src_id, val_id;
|
|
unsigned int i;
|
|
@@ -8824,8 +8678,8 @@ static void spirv_compiler_emit_ld(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, coordinate_id, val_id;
|
|
SpvImageOperandsMask operands_mask = 0;
|
|
unsigned int image_operand_count = 0;
|
|
@@ -8870,9 +8724,9 @@ static void spirv_compiler_emit_lod(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
- const struct vkd3d_shader_src_param *resource, *sampler;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
+ const struct vsir_src_operand *resource, *sampler;
|
|
uint32_t type_id, coordinate_id, val_id;
|
|
struct vkd3d_shader_image image;
|
|
|
|
@@ -8895,10 +8749,10 @@ static void spirv_compiler_emit_sample(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
- const struct vkd3d_shader_src_param *resource, *sampler;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
unsigned int image_operand_count = 0, component_count;
|
|
+ const struct vsir_src_operand *resource, *sampler;
|
|
uint32_t sampled_type_id, coordinate_id, val_id;
|
|
SpvImageOperandsMask operands_mask = 0;
|
|
struct vkd3d_shader_image image;
|
|
@@ -8965,9 +8819,9 @@ static void spirv_compiler_emit_sample_c(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
uint32_t sampled_type_id, coordinate_id, dref_id, val_id;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
SpvImageOperandsMask operands_mask = 0;
|
|
unsigned int image_operand_count = 0;
|
|
struct vkd3d_shader_image image;
|
|
@@ -9009,11 +8863,11 @@ static void spirv_compiler_emit_sample_c(struct spirv_compiler *compiler,
|
|
static void spirv_compiler_emit_gather4(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
- const struct vkd3d_shader_src_param *addr, *offset, *resource, *sampler;
|
|
uint32_t sampled_type_id, coordinate_id, component_id, dref_id, val_id;
|
|
+ const struct vsir_src_operand *addr, *offset, *resource, *sampler;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
unsigned int image_flags = VKD3D_IMAGE_FLAG_SAMPLED;
|
|
unsigned int component_count, component_idx;
|
|
SpvImageOperandsMask operands_mask = 0;
|
|
@@ -9080,11 +8934,11 @@ static void spirv_compiler_emit_gather4(struct spirv_compiler *compiler,
|
|
|
|
static uint32_t spirv_compiler_emit_raw_structured_addressing(
|
|
struct spirv_compiler *compiler, uint32_t type_id, unsigned int stride,
|
|
- const struct vkd3d_shader_src_param *src0, uint32_t src0_mask,
|
|
- const struct vkd3d_shader_src_param *src1, uint32_t src1_mask)
|
|
+ const struct vsir_src_operand *src0, uint32_t src0_mask,
|
|
+ const struct vsir_src_operand *src1, uint32_t src1_mask)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_src_param *offset;
|
|
+ const struct vsir_src_operand *offset;
|
|
uint32_t structure_id = 0, offset_id;
|
|
uint32_t offset_write_mask;
|
|
|
|
@@ -9112,11 +8966,11 @@ static void spirv_compiler_emit_ld_raw_structured_srv_uav(struct spirv_compiler
|
|
{
|
|
uint32_t coordinate_id, type_id, val_id, texel_type_id, ptr_type_id, ptr_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
- const struct vkd3d_shader_src_param *resource;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
const struct vkd3d_symbol *resource_symbol;
|
|
uint32_t base_coordinate_id, component_idx;
|
|
+ const struct vsir_src_operand *resource;
|
|
uint32_t constituents[VKD3D_VEC4_SIZE];
|
|
struct vkd3d_shader_image image;
|
|
bool storage_buffer_uav = false;
|
|
@@ -9199,12 +9053,12 @@ static void spirv_compiler_emit_ld_tgsm(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t coordinate_id, type_id, ptr_type_id, ptr_id;
|
|
- const struct vkd3d_shader_src_param *resource;
|
|
struct vkd3d_shader_register_info reg_info;
|
|
uint32_t base_coordinate_id, component_idx;
|
|
+ const struct vsir_src_operand *resource;
|
|
uint32_t constituents[VKD3D_VEC4_SIZE];
|
|
unsigned int i, j;
|
|
|
|
@@ -9258,11 +9112,11 @@ static void spirv_compiler_emit_store_uav_raw_structured(struct spirv_compiler *
|
|
{
|
|
uint32_t coordinate_id, type_id, val_id, data_id, ptr_type_id, ptr_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
const struct vkd3d_symbol *resource_symbol;
|
|
uint32_t base_coordinate_id, component_idx;
|
|
- const struct vkd3d_shader_src_param *data;
|
|
+ const struct vsir_src_operand *data;
|
|
struct vkd3d_shader_image image;
|
|
unsigned int component_count;
|
|
uint32_t indices[2];
|
|
@@ -9335,11 +9189,11 @@ static void spirv_compiler_emit_store_tgsm(struct spirv_compiler *compiler,
|
|
{
|
|
uint32_t coordinate_id, type_id, val_id, ptr_type_id, ptr_id, data_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t base_coordinate_id, component_idx;
|
|
struct vkd3d_shader_register_info reg_info;
|
|
- struct vkd3d_shader_src_param data;
|
|
+ struct vsir_src_operand data;
|
|
unsigned int component_count;
|
|
|
|
if (!spirv_compiler_get_register_info(compiler, &dst->reg, ®_info))
|
|
@@ -9392,8 +9246,8 @@ static void spirv_compiler_emit_ld_uav_typed(struct spirv_compiler *compiler,
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
uint32_t coordinate_id, type_id, val_id, ptr_type_id, ptr_id;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
const struct vkd3d_symbol *resource_symbol;
|
|
struct vkd3d_shader_image image;
|
|
uint32_t coordinate_mask;
|
|
@@ -9434,8 +9288,8 @@ static void spirv_compiler_emit_store_uav_typed(struct spirv_compiler *compiler,
|
|
{
|
|
uint32_t coordinate_id, texel_id, type_id, val_id, ptr_type_id, ptr_id;
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
const struct vkd3d_symbol *resource_symbol;
|
|
struct vkd3d_shader_image image;
|
|
uint32_t coordinate_mask;
|
|
@@ -9474,9 +9328,9 @@ static void spirv_compiler_emit_uav_counter_instruction(struct spirv_compiler *c
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
unsigned int memory_semantics = SpvMemorySemanticsMaskNone;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t ptr_type_id, type_id, counter_id, result_id;
|
|
uint32_t coordinate_id, sample_id, pointer_id;
|
|
const struct vkd3d_symbol *resource_symbol;
|
|
@@ -9594,13 +9448,13 @@ static void spirv_compiler_emit_atomic_instruction(struct spirv_compiler *compil
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
const struct vkd3d_symbol *resource_symbol = NULL;
|
|
uint32_t ptr_type_id, type_id, val_id, result_id;
|
|
- const struct vkd3d_shader_dst_param *resource;
|
|
uint32_t coordinate_id, sample_id, pointer_id;
|
|
struct vkd3d_shader_register_info reg_info;
|
|
+ const struct vsir_dst_operand *resource;
|
|
struct vkd3d_shader_image image;
|
|
enum vsir_data_type data_type;
|
|
unsigned int structure_stride;
|
|
@@ -9722,8 +9576,8 @@ static void spirv_compiler_emit_bufinfo(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
const struct vkd3d_symbol *resource_symbol;
|
|
uint32_t type_id, val_id, stride_id;
|
|
struct vkd3d_shader_image image;
|
|
@@ -9774,8 +9628,8 @@ static void spirv_compiler_emit_resinfo(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, lod_id, val_id, miplevel_count_id;
|
|
enum vsir_data_type data_type = VSIR_DATA_U32;
|
|
uint32_t constituents[VKD3D_VEC4_SIZE];
|
|
@@ -9832,7 +9686,7 @@ static void spirv_compiler_emit_resinfo(struct spirv_compiler *compiler,
|
|
}
|
|
|
|
static uint32_t spirv_compiler_emit_query_sample_count(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_src_param *src)
|
|
+ const struct vsir_src_operand *src)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
struct vkd3d_shader_image image;
|
|
@@ -9859,8 +9713,8 @@ static void spirv_compiler_emit_sample_info(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
enum vsir_data_type data_type = VSIR_DATA_U32;
|
|
uint32_t constituents[VKD3D_VEC4_SIZE];
|
|
uint32_t type_id, val_id;
|
|
@@ -9940,7 +9794,7 @@ static void spirv_compiler_emit_sample_position(struct spirv_compiler *compiler,
|
|
};
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
uint32_t constituents[ARRAY_SIZE(standard_sample_positions)];
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t array_type_id, length_id, index_id, id;
|
|
uint32_t sample_count_id, sample_index_id;
|
|
uint32_t type_id, bool_id, ptr_type_id;
|
|
@@ -9994,15 +9848,14 @@ static void spirv_compiler_emit_eval_attrib(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
- const struct vkd3d_shader_register *input = &src[0].reg;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t instr_set_id, type_id, val_id, src_ids[2];
|
|
struct vkd3d_shader_register_info register_info;
|
|
unsigned int src_count = 0;
|
|
enum GLSLstd450 op;
|
|
|
|
- if (!spirv_compiler_get_register_info(compiler, input, ®ister_info))
|
|
+ if (!spirv_compiler_get_register_info(compiler, &src[0].reg, ®ister_info))
|
|
return;
|
|
|
|
if (register_info.storage_class != SpvStorageClassInput)
|
|
@@ -10157,9 +10010,9 @@ static void spirv_compiler_emit_quad_read_across(struct spirv_compiler *compiler
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
uint32_t type_id, direction_type_id, direction_id, val_id;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
|
|
type_id = spirv_get_type_id(compiler, dst->reg.data_type, vsir_write_mask_component_count(dst->write_mask));
|
|
direction_type_id = spirv_get_type_id(compiler, VSIR_DATA_U32, 1);
|
|
@@ -10175,8 +10028,8 @@ static void spirv_compiler_emit_quad_read_lane_at(struct spirv_compiler *compile
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, val_id, lane_id;
|
|
|
|
if (!register_is_constant_or_undef(&src[1].reg))
|
|
@@ -10213,8 +10066,8 @@ static void spirv_compiler_emit_wave_bool_op(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, val_id;
|
|
SpvOp op;
|
|
|
|
@@ -10230,7 +10083,7 @@ static void spirv_compiler_emit_wave_bool_op(struct spirv_compiler *compiler,
|
|
}
|
|
|
|
static uint32_t spirv_compiler_emit_group_nonuniform_ballot(struct spirv_compiler *compiler,
|
|
- const struct vkd3d_shader_src_param *src)
|
|
+ const struct vsir_src_operand *src)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
uint32_t type_id, val_id;
|
|
@@ -10245,7 +10098,7 @@ static uint32_t spirv_compiler_emit_group_nonuniform_ballot(struct spirv_compile
|
|
static void spirv_compiler_emit_wave_active_ballot(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t val_id;
|
|
|
|
val_id = spirv_compiler_emit_group_nonuniform_ballot(compiler, instruction->src);
|
|
@@ -10287,8 +10140,8 @@ static void spirv_compiler_emit_wave_alu_op(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, val_id;
|
|
SpvOp op;
|
|
|
|
@@ -10310,7 +10163,7 @@ static void spirv_compiler_emit_wave_bit_count(struct spirv_compiler *compiler,
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
SpvGroupOperation group_op;
|
|
uint32_t type_id, val_id;
|
|
|
|
@@ -10328,7 +10181,7 @@ static void spirv_compiler_emit_wave_is_first_lane(struct spirv_compiler *compil
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t val_id;
|
|
|
|
val_id = vkd3d_spirv_build_op_group_nonuniform_elect(builder);
|
|
@@ -10339,8 +10192,8 @@ static void spirv_compiler_emit_wave_read_lane_at(struct spirv_compiler *compile
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, lane_id, val_id;
|
|
|
|
type_id = spirv_get_type_id(compiler, dst->reg.data_type, vsir_write_mask_component_count(dst->write_mask));
|
|
@@ -10366,8 +10219,8 @@ static void spirv_compiler_emit_wave_read_lane_first(struct spirv_compiler *comp
|
|
const struct vkd3d_shader_instruction *instruction)
|
|
{
|
|
struct vkd3d_spirv_builder *builder = &compiler->spirv_builder;
|
|
- const struct vkd3d_shader_dst_param *dst = instruction->dst;
|
|
- const struct vkd3d_shader_src_param *src = instruction->src;
|
|
+ const struct vsir_src_operand *src = instruction->src;
|
|
+ const struct vsir_dst_operand *dst = instruction->dst;
|
|
uint32_t type_id, val_id;
|
|
|
|
type_id = spirv_get_type_id(compiler, dst->reg.data_type, vsir_write_mask_component_count(dst->write_mask));
|
|
@@ -10773,7 +10626,7 @@ static int spirv_compiler_handle_instruction(struct spirv_compiler *compiler,
|
|
|
|
static void spirv_compiler_emit_io_declarations(struct spirv_compiler *compiler)
|
|
{
|
|
- struct vkd3d_shader_dst_param dst;
|
|
+ struct vsir_dst_operand dst;
|
|
|
|
for (unsigned int i = 0; i < compiler->program->input_signature.element_count; ++i)
|
|
spirv_compiler_emit_input(compiler, VKD3DSPR_INPUT, i);
|
|
@@ -10798,14 +10651,14 @@ static void spirv_compiler_emit_io_declarations(struct spirv_compiler *compiler)
|
|
|
|
if (compiler->program->has_point_size)
|
|
{
|
|
- vsir_dst_param_init(&dst, VKD3DSPR_RASTOUT, VSIR_DATA_F32, 1);
|
|
+ vsir_dst_operand_init(&dst, VKD3DSPR_RASTOUT, VSIR_DATA_F32, 1);
|
|
dst.reg.idx[0].offset = VSIR_RASTOUT_POINT_SIZE;
|
|
spirv_compiler_emit_io_register(compiler, &dst);
|
|
}
|
|
|
|
if (compiler->program->has_point_coord)
|
|
{
|
|
- vsir_dst_param_init(&dst, VKD3DSPR_POINT_COORD, VSIR_DATA_F32, 0);
|
|
+ vsir_dst_operand_init(&dst, VKD3DSPR_POINT_COORD, VSIR_DATA_F32, 0);
|
|
spirv_compiler_emit_io_register(compiler, &dst);
|
|
}
|
|
|
|
@@ -10816,7 +10669,7 @@ static void spirv_compiler_emit_io_declarations(struct spirv_compiler *compiler)
|
|
if (bitmap_is_set(compiler->program->io_dcls, i)
|
|
|| (compiler->program->shader_version.type == VKD3D_SHADER_TYPE_HULL && i == VKD3DSPR_OUTPOINTID))
|
|
{
|
|
- vsir_dst_param_init(&dst, i, VSIR_DATA_F32, 0);
|
|
+ vsir_dst_operand_init(&dst, i, VSIR_DATA_F32, 0);
|
|
spirv_compiler_emit_io_register(compiler, &dst);
|
|
}
|
|
}
|
|
@@ -10967,9 +10820,6 @@ static int spirv_compiler_generate_spirv(struct spirv_compiler *compiler,
|
|
|| (program->shader_version.type == VKD3D_SHADER_TYPE_HULL && !spirv_compiler_is_opengl_target(compiler)))
|
|
spirv_compiler_emit_tessellator_domain(compiler, program->tess_domain);
|
|
|
|
- if (compiler->shader_type != VKD3D_SHADER_TYPE_HULL)
|
|
- spirv_compiler_emit_shader_signature_outputs(compiler);
|
|
-
|
|
it = vsir_program_iterator(&program->instructions);
|
|
for (ins = vsir_program_iterator_head(&it); ins && result >= 0; ins = vsir_program_iterator_next(&it))
|
|
{
|
|
@@ -11071,8 +10921,9 @@ int spirv_compile(struct vsir_program *program, uint64_t config_flags,
|
|
return ret;
|
|
|
|
VKD3D_ASSERT(program->normalisation_level == VSIR_NORMALISED_SM6);
|
|
- VKD3D_ASSERT(program->has_descriptor_info);
|
|
- VKD3D_ASSERT(program->has_no_modifiers);
|
|
+ VKD3D_ASSERT(program->normalisation_flags.normalised_clip_cull_arrays);
|
|
+ VKD3D_ASSERT(program->normalisation_flags.has_descriptor_info);
|
|
+ VKD3D_ASSERT(program->normalisation_flags.has_no_modifiers);
|
|
|
|
if (!(spirv_compiler = spirv_compiler_create(program, compile_info,
|
|
message_context, config_flags)))
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/tpf.c b/libs/vkd3d/libs/vkd3d-shader/tpf.c
|
|
index 4798a75ce90..97b9ea0fdce 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/tpf.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/tpf.c
|
|
@@ -771,10 +771,10 @@ static bool shader_is_sm_5_1(const struct vkd3d_shader_sm4_parser *sm4)
|
|
return version->major >= 5 && version->minor >= 1;
|
|
}
|
|
|
|
-static bool shader_sm4_read_src_param(struct vkd3d_shader_sm4_parser *priv, const uint32_t **ptr,
|
|
- const uint32_t *end, enum vsir_data_type data_type, struct vkd3d_shader_src_param *src_param);
|
|
-static bool shader_sm4_read_dst_param(struct vkd3d_shader_sm4_parser *priv, const uint32_t **ptr,
|
|
- const uint32_t *end, enum vsir_data_type data_type, struct vkd3d_shader_dst_param *dst_param);
|
|
+static bool tpf_read_src_operand(struct vkd3d_shader_sm4_parser *tpf, const uint32_t **ptr,
|
|
+ const uint32_t *end, enum vsir_data_type data_type, struct vsir_src_operand *src);
|
|
+static bool tpf_read_dst_operand(struct vkd3d_shader_sm4_parser *tpf, const uint32_t **ptr,
|
|
+ const uint32_t *end, enum vsir_data_type data_type, struct vsir_dst_operand *dst);
|
|
|
|
static bool shader_sm4_read_register_space(struct vkd3d_shader_sm4_parser *priv,
|
|
const uint32_t **ptr, const uint32_t *end, unsigned int *register_space)
|
|
@@ -795,23 +795,20 @@ static bool shader_sm4_read_register_space(struct vkd3d_shader_sm4_parser *priv,
|
|
}
|
|
|
|
static void shader_sm4_read_conditional_op(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
- shader_sm4_read_src_param(priv, &tokens, &tokens[token_count], VSIR_DATA_U32, &ins->src[0]);
|
|
+ tpf_read_src_operand(tpf, &tokens, &tokens[token_count], VSIR_DATA_U32, &ins->src[0]);
|
|
ins->flags = (opcode_token & VKD3D_SM4_CONDITIONAL_NZ) ?
|
|
VKD3D_SHADER_CONDITIONAL_OP_NZ : VKD3D_SHADER_CONDITIONAL_OP_Z;
|
|
}
|
|
|
|
static void shader_sm4_read_case_condition(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
- shader_sm4_read_src_param(priv, &tokens, &tokens[token_count], VSIR_DATA_U32, &ins->src[0]);
|
|
+ tpf_read_src_operand(tpf, &tokens, &tokens[token_count], VSIR_DATA_U32, &ins->src[0]);
|
|
if (ins->src[0].reg.type != VKD3DSPR_IMMCONST)
|
|
- {
|
|
- FIXME("Switch case value is not a 32-bit constant.\n");
|
|
- vkd3d_shader_parser_error(&priv->p, VKD3D_SHADER_ERROR_TPF_INVALID_CASE_VALUE,
|
|
+ vkd3d_shader_parser_error(&tpf->p, VKD3D_SHADER_ERROR_TPF_INVALID_CASE_VALUE,
|
|
"Switch case value is not a 32-bit immediate constant register.");
|
|
- }
|
|
}
|
|
|
|
static void shader_sm4_read_shader_data(struct vkd3d_shader_instruction *ins, uint32_t opcode, uint32_t opcode_token,
|
|
@@ -869,7 +866,7 @@ static void shader_sm4_set_descriptor_register_range(struct vkd3d_shader_sm4_par
|
|
}
|
|
|
|
static void shader_sm4_read_dcl_resource(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
struct vkd3d_shader_semantic *semantic = &ins->declaration.semantic;
|
|
enum vkd3d_sm4_resource_type resource_type;
|
|
@@ -898,8 +895,8 @@ static void shader_sm4_read_dcl_resource(struct vkd3d_shader_instruction *ins, u
|
|
}
|
|
|
|
reg_data_type = VSIR_DATA_UNUSED;
|
|
- shader_sm4_read_dst_param(priv, &tokens, end, reg_data_type, &semantic->resource.reg);
|
|
- shader_sm4_set_descriptor_register_range(priv, &semantic->resource.reg.reg, &semantic->resource.range);
|
|
+ tpf_read_dst_operand(tpf, &tokens, end, reg_data_type, &semantic->resource.reg);
|
|
+ shader_sm4_set_descriptor_register_range(tpf, &semantic->resource.reg.reg, &semantic->resource.range);
|
|
|
|
components = *tokens++;
|
|
for (i = 0; i < VKD3D_VEC4_SIZE; i++)
|
|
@@ -920,23 +917,23 @@ static void shader_sm4_read_dcl_resource(struct vkd3d_shader_instruction *ins, u
|
|
if (opcode != VKD3D_SM4_OP_DCL_RESOURCE)
|
|
ins->flags = (opcode_token & VKD3D_SM5_UAV_FLAGS_MASK) >> VKD3D_SM5_UAV_FLAGS_SHIFT;
|
|
|
|
- shader_sm4_read_register_space(priv, &tokens, end, &semantic->resource.range.space);
|
|
+ shader_sm4_read_register_space(tpf, &tokens, end, &semantic->resource.range.space);
|
|
}
|
|
|
|
static void shader_sm4_read_dcl_constant_buffer(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
const uint32_t *end = &tokens[token_count];
|
|
|
|
- shader_sm4_read_src_param(priv, &tokens, end, VSIR_DATA_F32, &ins->declaration.cb.src);
|
|
- shader_sm4_set_descriptor_register_range(priv, &ins->declaration.cb.src.reg, &ins->declaration.cb.range);
|
|
+ tpf_read_src_operand(tpf, &tokens, end, VSIR_DATA_F32, &ins->declaration.cb.src);
|
|
+ shader_sm4_set_descriptor_register_range(tpf, &ins->declaration.cb.src.reg, &ins->declaration.cb.range);
|
|
if (opcode_token & VKD3D_SM4_INDEX_TYPE_MASK)
|
|
ins->flags |= VKD3DSI_INDEXED_DYNAMIC;
|
|
|
|
ins->declaration.cb.size = ins->declaration.cb.src.reg.idx[2].offset;
|
|
ins->declaration.cb.range.space = 0;
|
|
|
|
- if (shader_is_sm_5_1(priv))
|
|
+ if (shader_is_sm_5_1(tpf))
|
|
{
|
|
if (tokens >= end)
|
|
{
|
|
@@ -945,28 +942,23 @@ static void shader_sm4_read_dcl_constant_buffer(struct vkd3d_shader_instruction
|
|
}
|
|
|
|
ins->declaration.cb.size = *tokens++;
|
|
- shader_sm4_read_register_space(priv, &tokens, end, &ins->declaration.cb.range.space);
|
|
+ shader_sm4_read_register_space(tpf, &tokens, end, &ins->declaration.cb.range.space);
|
|
}
|
|
|
|
ins->declaration.cb.size *= VKD3D_VEC4_SIZE * sizeof(float);
|
|
}
|
|
|
|
static void shader_sm4_read_dcl_sampler(struct vkd3d_shader_instruction *ins, uint32_t opcode, uint32_t opcode_token,
|
|
- const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
const uint32_t *end = &tokens[token_count];
|
|
|
|
ins->flags = (opcode_token & VKD3D_SM4_SAMPLER_MODE_MASK) >> VKD3D_SM4_SAMPLER_MODE_SHIFT;
|
|
if (ins->flags & ~VKD3D_SM4_SAMPLER_COMPARISON)
|
|
FIXME("Unhandled sampler mode %#x.\n", ins->flags);
|
|
- shader_sm4_read_src_param(priv, &tokens, end, VSIR_DATA_UNUSED, &ins->declaration.sampler.src);
|
|
- shader_sm4_set_descriptor_register_range(priv, &ins->declaration.sampler.src.reg, &ins->declaration.sampler.range);
|
|
- shader_sm4_read_register_space(priv, &tokens, end, &ins->declaration.sampler.range.space);
|
|
-}
|
|
-
|
|
-static bool sm4_parser_is_in_fork_or_join_phase(const struct vkd3d_shader_sm4_parser *sm4)
|
|
-{
|
|
- return sm4->phase == VSIR_OP_HS_FORK_PHASE || sm4->phase == VSIR_OP_HS_JOIN_PHASE;
|
|
+ tpf_read_src_operand(tpf, &tokens, end, VSIR_DATA_UNUSED, &ins->declaration.sampler.src);
|
|
+ shader_sm4_set_descriptor_register_range(tpf, &ins->declaration.sampler.src.reg, &ins->declaration.sampler.range);
|
|
+ shader_sm4_read_register_space(tpf, &tokens, end, &ins->declaration.sampler.range.space);
|
|
}
|
|
|
|
static void shader_sm4_read_dcl_index_range(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
@@ -981,7 +973,7 @@ static void shader_sm4_read_dcl_index_range(struct vkd3d_shader_instruction *ins
|
|
unsigned int *io_masks;
|
|
uint32_t write_mask;
|
|
|
|
- shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VSIR_DATA_OPAQUE, &index_range->dst);
|
|
+ tpf_read_dst_operand(priv, &tokens, &tokens[token_count], VSIR_DATA_OPAQUE, &index_range->dst);
|
|
index_range->register_count = *tokens;
|
|
|
|
register_idx = index_range->dst.reg.idx[index_range->dst.reg.idx_count - 1].offset;
|
|
@@ -997,7 +989,7 @@ static void shader_sm4_read_dcl_index_range(struct vkd3d_shader_instruction *ins
|
|
signature = &program->input_signature;
|
|
break;
|
|
case VKD3DSPR_OUTPUT:
|
|
- if (sm4_parser_is_in_fork_or_join_phase(priv))
|
|
+ if (vsir_opcode_is_fork_or_join_phase(priv->phase))
|
|
{
|
|
io_masks = priv->patch_constant_register_masks;
|
|
ranges = &priv->patch_constant_index_ranges;
|
|
@@ -1141,68 +1133,54 @@ static void shader_sm4_read_declaration_count(struct vkd3d_shader_instruction *i
|
|
}
|
|
|
|
static void shader_sm4_read_declaration_dst(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
- shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VSIR_DATA_F32, &ins->declaration.dst);
|
|
+ tpf_read_dst_operand(tpf, &tokens, &tokens[token_count], VSIR_DATA_F32, &ins->declaration.dst);
|
|
}
|
|
|
|
static void shader_sm4_read_declaration_register_semantic(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
- shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count],
|
|
+ tpf_read_dst_operand(tpf, &tokens, &tokens[token_count],
|
|
VSIR_DATA_F32, &ins->declaration.register_semantic.reg);
|
|
ins->declaration.register_semantic.sysval_semantic = *tokens;
|
|
}
|
|
|
|
static void shader_sm4_read_dcl_input_ps(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->declaration.dst;
|
|
+ struct vsir_dst_operand *dst = &ins->declaration.dst;
|
|
+ struct signature_element *e;
|
|
|
|
ins->flags = (opcode_token & VKD3D_SM4_INTERPOLATION_MODE_MASK) >> VKD3D_SM4_INTERPOLATION_MODE_SHIFT;
|
|
- if (shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VSIR_DATA_F32, dst))
|
|
+ if (tpf_read_dst_operand(tpf, &tokens, &tokens[token_count], VSIR_DATA_F32, dst))
|
|
{
|
|
- struct signature_element *e = vsir_signature_find_element_for_reg(
|
|
- &priv->program->input_signature, dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask);
|
|
-
|
|
- if (!e)
|
|
- {
|
|
- WARN("No matching signature element for input register %u with mask %#x.\n",
|
|
- dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask);
|
|
- vkd3d_shader_parser_error(&priv->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_DCL,
|
|
+ if (!(e = vsir_signature_find_element_for_reg(&tpf->program->input_signature,
|
|
+ dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask)))
|
|
+ vkd3d_shader_parser_error(&tpf->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_DCL,
|
|
"No matching signature element for input register %u with mask %#x.",
|
|
dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask);
|
|
- }
|
|
else
|
|
- {
|
|
e->interpolation_mode = ins->flags;
|
|
- }
|
|
}
|
|
}
|
|
|
|
static void shader_sm4_read_dcl_input_ps_siv(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
- struct vkd3d_shader_dst_param *dst = &ins->declaration.register_semantic.reg;
|
|
+ struct vsir_dst_operand *dst = &ins->declaration.register_semantic.reg;
|
|
+ struct signature_element *e;
|
|
|
|
ins->flags = (opcode_token & VKD3D_SM4_INTERPOLATION_MODE_MASK) >> VKD3D_SM4_INTERPOLATION_MODE_SHIFT;
|
|
- if (shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VSIR_DATA_F32, dst))
|
|
+ if (tpf_read_dst_operand(tpf, &tokens, &tokens[token_count], VSIR_DATA_F32, dst))
|
|
{
|
|
- struct signature_element *e = vsir_signature_find_element_for_reg(
|
|
- &priv->program->input_signature, dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask);
|
|
-
|
|
- if (!e)
|
|
- {
|
|
- WARN("No matching signature element for input register %u with mask %#x.\n",
|
|
- dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask);
|
|
- vkd3d_shader_parser_error(&priv->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_DCL,
|
|
+ if (!(e = vsir_signature_find_element_for_reg(&tpf->program->input_signature,
|
|
+ dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask)))
|
|
+ vkd3d_shader_parser_error(&tpf->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_DCL,
|
|
"No matching signature element for input register %u with mask %#x.",
|
|
dst->reg.idx[dst->reg.idx_count - 1].offset, dst->write_mask);
|
|
- }
|
|
else
|
|
- {
|
|
e->interpolation_mode = ins->flags;
|
|
- }
|
|
}
|
|
ins->declaration.register_semantic.sysval_semantic = *tokens;
|
|
}
|
|
@@ -1226,10 +1204,10 @@ static void shader_sm4_read_dcl_global_flags(struct vkd3d_shader_instruction *in
|
|
}
|
|
|
|
static void shader_sm5_read_fcall(struct vkd3d_shader_instruction *ins, uint32_t opcode, uint32_t opcode_token,
|
|
- const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
ins->src[0].reg.u.fp_body_idx = *tokens++;
|
|
- shader_sm4_read_src_param(priv, &tokens, &tokens[token_count], VSIR_DATA_OPAQUE, &ins->src[0]);
|
|
+ tpf_read_src_operand(tpf, &tokens, &tokens[token_count], VSIR_DATA_OPAQUE, &ins->src[0]);
|
|
}
|
|
|
|
static void shader_sm5_read_dcl_function_body(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
@@ -1311,36 +1289,36 @@ static void shader_sm5_read_dcl_thread_group(struct vkd3d_shader_instruction *in
|
|
}
|
|
|
|
static void shader_sm5_read_dcl_uav_raw(struct vkd3d_shader_instruction *ins, uint32_t opcode, uint32_t opcode_token,
|
|
- const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
struct vkd3d_shader_raw_resource *resource = &ins->declaration.raw_resource;
|
|
const uint32_t *end = &tokens[token_count];
|
|
|
|
- shader_sm4_read_dst_param(priv, &tokens, end, VSIR_DATA_UNUSED, &resource->resource.reg);
|
|
- shader_sm4_set_descriptor_register_range(priv, &resource->resource.reg.reg, &resource->resource.range);
|
|
+ tpf_read_dst_operand(tpf, &tokens, end, VSIR_DATA_UNUSED, &resource->resource.reg);
|
|
+ shader_sm4_set_descriptor_register_range(tpf, &resource->resource.reg.reg, &resource->resource.range);
|
|
ins->flags = (opcode_token & VKD3D_SM5_UAV_FLAGS_MASK) >> VKD3D_SM5_UAV_FLAGS_SHIFT;
|
|
- shader_sm4_read_register_space(priv, &tokens, end, &resource->resource.range.space);
|
|
+ shader_sm4_read_register_space(tpf, &tokens, end, &resource->resource.range.space);
|
|
}
|
|
|
|
static void shader_sm5_read_dcl_uav_structured(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
struct vkd3d_shader_structured_resource *resource = &ins->declaration.structured_resource;
|
|
const uint32_t *end = &tokens[token_count];
|
|
|
|
- shader_sm4_read_dst_param(priv, &tokens, end, VSIR_DATA_UNUSED, &resource->resource.reg);
|
|
- shader_sm4_set_descriptor_register_range(priv, &resource->resource.reg.reg, &resource->resource.range);
|
|
+ tpf_read_dst_operand(tpf, &tokens, end, VSIR_DATA_UNUSED, &resource->resource.reg);
|
|
+ shader_sm4_set_descriptor_register_range(tpf, &resource->resource.reg.reg, &resource->resource.range);
|
|
ins->flags = (opcode_token & VKD3D_SM5_UAV_FLAGS_MASK) >> VKD3D_SM5_UAV_FLAGS_SHIFT;
|
|
resource->byte_stride = *tokens++;
|
|
if (resource->byte_stride % 4)
|
|
FIXME("Byte stride %u is not multiple of 4.\n", resource->byte_stride);
|
|
- shader_sm4_read_register_space(priv, &tokens, end, &resource->resource.range.space);
|
|
+ shader_sm4_read_register_space(tpf, &tokens, end, &resource->resource.range.space);
|
|
}
|
|
|
|
static void shader_sm5_read_dcl_tgsm_raw(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
- shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count], VSIR_DATA_F32, &ins->declaration.tgsm_raw.reg);
|
|
+ tpf_read_dst_operand(tpf, &tokens, &tokens[token_count], VSIR_DATA_F32, &ins->declaration.tgsm_raw.reg);
|
|
ins->declaration.tgsm_raw.byte_count = *tokens;
|
|
if (ins->declaration.tgsm_raw.byte_count % 4)
|
|
FIXME("Byte count %u is not multiple of 4.\n", ins->declaration.tgsm_raw.byte_count);
|
|
@@ -1348,10 +1326,9 @@ static void shader_sm5_read_dcl_tgsm_raw(struct vkd3d_shader_instruction *ins, u
|
|
}
|
|
|
|
static void shader_sm5_read_dcl_tgsm_structured(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
- shader_sm4_read_dst_param(priv, &tokens, &tokens[token_count],
|
|
- VSIR_DATA_F32, &ins->declaration.tgsm_structured.reg);
|
|
+ tpf_read_dst_operand(tpf, &tokens, &tokens[token_count], VSIR_DATA_F32, &ins->declaration.tgsm_structured.reg);
|
|
ins->declaration.tgsm_structured.byte_stride = *tokens++;
|
|
ins->declaration.tgsm_structured.structure_count = *tokens;
|
|
if (ins->declaration.tgsm_structured.byte_stride % 4)
|
|
@@ -1360,28 +1337,28 @@ static void shader_sm5_read_dcl_tgsm_structured(struct vkd3d_shader_instruction
|
|
}
|
|
|
|
static void shader_sm5_read_dcl_resource_structured(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
struct vkd3d_shader_structured_resource *resource = &ins->declaration.structured_resource;
|
|
const uint32_t *end = &tokens[token_count];
|
|
|
|
- shader_sm4_read_dst_param(priv, &tokens, end, VSIR_DATA_UNUSED, &resource->resource.reg);
|
|
- shader_sm4_set_descriptor_register_range(priv, &resource->resource.reg.reg, &resource->resource.range);
|
|
+ tpf_read_dst_operand(tpf, &tokens, end, VSIR_DATA_UNUSED, &resource->resource.reg);
|
|
+ shader_sm4_set_descriptor_register_range(tpf, &resource->resource.reg.reg, &resource->resource.range);
|
|
resource->byte_stride = *tokens++;
|
|
if (resource->byte_stride % 4)
|
|
FIXME("Byte stride %u is not multiple of 4.\n", resource->byte_stride);
|
|
- shader_sm4_read_register_space(priv, &tokens, end, &resource->resource.range.space);
|
|
+ shader_sm4_read_register_space(tpf, &tokens, end, &resource->resource.range.space);
|
|
}
|
|
|
|
static void shader_sm5_read_dcl_resource_raw(struct vkd3d_shader_instruction *ins, uint32_t opcode,
|
|
- uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *priv)
|
|
+ uint32_t opcode_token, const uint32_t *tokens, unsigned int token_count, struct vkd3d_shader_sm4_parser *tpf)
|
|
{
|
|
struct vkd3d_shader_raw_resource *resource = &ins->declaration.raw_resource;
|
|
const uint32_t *end = &tokens[token_count];
|
|
|
|
- shader_sm4_read_dst_param(priv, &tokens, end, VSIR_DATA_UNUSED, &resource->resource.reg);
|
|
- shader_sm4_set_descriptor_register_range(priv, &resource->resource.reg.reg, &resource->resource.range);
|
|
- shader_sm4_read_register_space(priv, &tokens, end, &resource->resource.range.space);
|
|
+ tpf_read_dst_operand(tpf, &tokens, end, VSIR_DATA_UNUSED, &resource->resource.reg);
|
|
+ shader_sm4_set_descriptor_register_range(tpf, &resource->resource.reg.reg, &resource->resource.range);
|
|
+ shader_sm4_read_register_space(tpf, &tokens, end, &resource->resource.range.space);
|
|
}
|
|
|
|
static void shader_sm5_read_sync(struct vkd3d_shader_instruction *ins, uint32_t opcode, uint32_t opcode_token,
|
|
@@ -2006,12 +1983,12 @@ static enum vsir_data_type map_data_type(char t)
|
|
}
|
|
}
|
|
|
|
-static bool shader_sm4_read_reg_idx(struct vkd3d_shader_sm4_parser *priv, const uint32_t **ptr,
|
|
+static bool shader_sm4_read_reg_idx(struct vkd3d_shader_sm4_parser *tpf, const uint32_t **ptr,
|
|
const uint32_t *end, uint32_t addressing, struct vkd3d_shader_register_index *reg_idx)
|
|
{
|
|
if (addressing & VKD3D_SM4_ADDRESSING_RELATIVE)
|
|
{
|
|
- struct vkd3d_shader_src_param *rel_addr = vsir_program_get_src_params(priv->program, 1);
|
|
+ struct vsir_src_operand *rel_addr = vsir_program_get_src_operands(tpf->program, 1);
|
|
|
|
if (!(reg_idx->rel_addr = rel_addr))
|
|
{
|
|
@@ -2023,7 +2000,7 @@ static bool shader_sm4_read_reg_idx(struct vkd3d_shader_sm4_parser *priv, const
|
|
reg_idx->offset = *(*ptr)++;
|
|
else
|
|
reg_idx->offset = 0;
|
|
- shader_sm4_read_src_param(priv, ptr, end, VSIR_DATA_I32, rel_addr);
|
|
+ tpf_read_src_operand(tpf, ptr, end, VSIR_DATA_I32, rel_addr);
|
|
}
|
|
else
|
|
{
|
|
@@ -2285,7 +2262,7 @@ static bool register_is_control_point_input(const struct vkd3d_shader_register *
|
|
const struct vkd3d_shader_sm4_parser *priv)
|
|
{
|
|
return reg->type == VKD3DSPR_INCONTROLPOINT || reg->type == VKD3DSPR_OUTCONTROLPOINT
|
|
- || (reg->type == VKD3DSPR_INPUT && (priv->phase == VSIR_OP_HS_CONTROL_POINT_PHASE
|
|
+ || (reg->type == VKD3DSPR_INPUT && (vsir_opcode_is_control_point_phase(priv->phase)
|
|
|| priv->program->shader_version.type == VKD3D_SHADER_TYPE_GEOMETRY));
|
|
}
|
|
|
|
@@ -2319,8 +2296,8 @@ static bool shader_sm4_validate_input_output_register(struct vkd3d_shader_sm4_pa
|
|
masks = priv->input_register_masks;
|
|
break;
|
|
case VKD3DSPR_OUTPUT:
|
|
- masks = sm4_parser_is_in_fork_or_join_phase(priv) ? priv->patch_constant_register_masks
|
|
- : priv->output_register_masks;
|
|
+ masks = vsir_opcode_is_fork_or_join_phase(priv->phase)
|
|
+ ? priv->patch_constant_register_masks : priv->output_register_masks;
|
|
break;
|
|
case VKD3DSPR_COLOROUT:
|
|
case VKD3DSPR_OUTCONTROLPOINT:
|
|
@@ -2349,8 +2326,8 @@ static bool shader_sm4_validate_input_output_register(struct vkd3d_shader_sm4_pa
|
|
return true;
|
|
}
|
|
|
|
-static bool shader_sm4_read_src_param(struct vkd3d_shader_sm4_parser *priv, const uint32_t **ptr,
|
|
- const uint32_t *end, enum vsir_data_type data_type, struct vkd3d_shader_src_param *src_param)
|
|
+static bool tpf_read_src_operand(struct vkd3d_shader_sm4_parser *tpf, const uint32_t **ptr,
|
|
+ const uint32_t *end, enum vsir_data_type data_type, struct vsir_src_operand *src)
|
|
{
|
|
unsigned int dimension, mask;
|
|
uint32_t token;
|
|
@@ -2362,7 +2339,7 @@ static bool shader_sm4_read_src_param(struct vkd3d_shader_sm4_parser *priv, cons
|
|
}
|
|
token = **ptr;
|
|
|
|
- if (!shader_sm4_read_param(priv, ptr, end, data_type, &src_param->reg, &src_param->modifiers))
|
|
+ if (!shader_sm4_read_param(tpf, ptr, end, data_type, &src->reg, &src->modifiers))
|
|
{
|
|
ERR("Failed to read parameter.\n");
|
|
return false;
|
|
@@ -2372,7 +2349,7 @@ static bool shader_sm4_read_src_param(struct vkd3d_shader_sm4_parser *priv, cons
|
|
{
|
|
case VKD3D_SM4_DIMENSION_NONE:
|
|
case VKD3D_SM4_DIMENSION_SCALAR:
|
|
- src_param->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
+ src->swizzle = VKD3D_SHADER_SWIZZLE(X, X, X, X);
|
|
break;
|
|
|
|
case VKD3D_SM4_DIMENSION_VEC4:
|
|
@@ -2383,37 +2360,30 @@ static bool shader_sm4_read_src_param(struct vkd3d_shader_sm4_parser *priv, cons
|
|
switch (swizzle_type)
|
|
{
|
|
case VKD3D_SM4_SWIZZLE_NONE:
|
|
- src_param->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
+ src->swizzle = VKD3D_SHADER_NO_SWIZZLE;
|
|
|
|
mask = (token & VKD3D_SM4_WRITEMASK_MASK) >> VKD3D_SM4_WRITEMASK_SHIFT;
|
|
/* Mask seems only to be used for vec4 constants and is always zero. */
|
|
- if (!register_is_constant(&src_param->reg))
|
|
- {
|
|
- FIXME("Source mask %#x is not for a constant.\n", mask);
|
|
- vkd3d_shader_parser_warning(&priv->p, VKD3D_SHADER_WARNING_TPF_UNHANDLED_REGISTER_MASK,
|
|
+ if (!register_is_constant(&src->reg))
|
|
+ vkd3d_shader_parser_warning(&tpf->p, VKD3D_SHADER_WARNING_TPF_UNHANDLED_REGISTER_MASK,
|
|
"Unhandled mask %#x for a non-constant source register.", mask);
|
|
- }
|
|
else if (mask)
|
|
- {
|
|
- FIXME("Unhandled mask %#x.\n", mask);
|
|
- vkd3d_shader_parser_warning(&priv->p, VKD3D_SHADER_WARNING_TPF_UNHANDLED_REGISTER_MASK,
|
|
+ vkd3d_shader_parser_warning(&tpf->p, VKD3D_SHADER_WARNING_TPF_UNHANDLED_REGISTER_MASK,
|
|
"Unhandled source register mask %#x.", mask);
|
|
- }
|
|
|
|
break;
|
|
|
|
case VKD3D_SM4_SWIZZLE_SCALAR:
|
|
- src_param->swizzle = (token & VKD3D_SM4_SWIZZLE_MASK) >> VKD3D_SM4_SWIZZLE_SHIFT;
|
|
- src_param->swizzle = (src_param->swizzle & 0x3) * 0x01010101;
|
|
+ src->swizzle = (token & VKD3D_SM4_SWIZZLE_MASK) >> VKD3D_SM4_SWIZZLE_SHIFT;
|
|
+ src->swizzle = (src->swizzle & 0x3) * 0x01010101;
|
|
break;
|
|
|
|
case VKD3D_SM4_SWIZZLE_VEC4:
|
|
- src_param->swizzle = swizzle_from_sm4((token & VKD3D_SM4_SWIZZLE_MASK) >> VKD3D_SM4_SWIZZLE_SHIFT);
|
|
+ src->swizzle = swizzle_from_sm4((token & VKD3D_SM4_SWIZZLE_MASK) >> VKD3D_SM4_SWIZZLE_SHIFT);
|
|
break;
|
|
|
|
default:
|
|
- FIXME("Unhandled swizzle type %#x.\n", swizzle_type);
|
|
- vkd3d_shader_parser_error(&priv->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_SWIZZLE,
|
|
+ vkd3d_shader_parser_error(&tpf->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_SWIZZLE,
|
|
"Source register swizzle type %#x is invalid.", swizzle_type);
|
|
break;
|
|
}
|
|
@@ -2421,24 +2391,23 @@ static bool shader_sm4_read_src_param(struct vkd3d_shader_sm4_parser *priv, cons
|
|
}
|
|
|
|
default:
|
|
- FIXME("Unhandled dimension %#x.\n", dimension);
|
|
- vkd3d_shader_parser_error(&priv->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_DIMENSION,
|
|
+ vkd3d_shader_parser_error(&tpf->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_DIMENSION,
|
|
"Source register dimension %#x is invalid.", dimension);
|
|
break;
|
|
}
|
|
|
|
if (data_type_is_64_bit(data_type))
|
|
- src_param->swizzle = vsir_swizzle_64_from_32(src_param->swizzle);
|
|
+ src->swizzle = vsir_swizzle_64_from_32(src->swizzle);
|
|
|
|
- if (register_is_input_output(&src_param->reg) && !shader_sm4_validate_input_output_register(priv,
|
|
- &src_param->reg, mask_from_swizzle(src_param->swizzle)))
|
|
+ if (register_is_input_output(&src->reg) && !shader_sm4_validate_input_output_register(tpf,
|
|
+ &src->reg, mask_from_swizzle(src->swizzle)))
|
|
return false;
|
|
|
|
return true;
|
|
}
|
|
|
|
-static bool shader_sm4_read_dst_param(struct vkd3d_shader_sm4_parser *priv, const uint32_t **ptr,
|
|
- const uint32_t *end, enum vsir_data_type data_type, struct vkd3d_shader_dst_param *dst_param)
|
|
+static bool tpf_read_dst_operand(struct vkd3d_shader_sm4_parser *tpf, const uint32_t **ptr,
|
|
+ const uint32_t *end, enum vsir_data_type data_type, struct vsir_dst_operand *dst)
|
|
{
|
|
enum vkd3d_sm4_swizzle_type swizzle_type;
|
|
enum vkd3d_shader_src_modifier modifier;
|
|
@@ -2452,7 +2421,7 @@ static bool shader_sm4_read_dst_param(struct vkd3d_shader_sm4_parser *priv, cons
|
|
}
|
|
token = **ptr;
|
|
|
|
- if (!shader_sm4_read_param(priv, ptr, end, data_type, &dst_param->reg, &modifier))
|
|
+ if (!shader_sm4_read_param(tpf, ptr, end, data_type, &dst->reg, &modifier))
|
|
{
|
|
ERR("Failed to read parameter.\n");
|
|
return false;
|
|
@@ -2467,11 +2436,11 @@ static bool shader_sm4_read_dst_param(struct vkd3d_shader_sm4_parser *priv, cons
|
|
switch ((dimension = (token & VKD3D_SM4_DIMENSION_MASK) >> VKD3D_SM4_DIMENSION_SHIFT))
|
|
{
|
|
case VKD3D_SM4_DIMENSION_NONE:
|
|
- dst_param->write_mask = 0;
|
|
+ dst->write_mask = 0;
|
|
break;
|
|
|
|
case VKD3D_SM4_DIMENSION_SCALAR:
|
|
- dst_param->write_mask = VKD3DSP_WRITEMASK_0;
|
|
+ dst->write_mask = VKD3DSP_WRITEMASK_0;
|
|
break;
|
|
|
|
case VKD3D_SM4_DIMENSION_VEC4:
|
|
@@ -2479,45 +2448,42 @@ static bool shader_sm4_read_dst_param(struct vkd3d_shader_sm4_parser *priv, cons
|
|
switch (swizzle_type)
|
|
{
|
|
case VKD3D_SM4_SWIZZLE_NONE:
|
|
- dst_param->write_mask = (token & VKD3D_SM4_WRITEMASK_MASK) >> VKD3D_SM4_WRITEMASK_SHIFT;
|
|
+ dst->write_mask = (token & VKD3D_SM4_WRITEMASK_MASK) >> VKD3D_SM4_WRITEMASK_SHIFT;
|
|
break;
|
|
|
|
case VKD3D_SM4_SWIZZLE_VEC4:
|
|
swizzle = swizzle_from_sm4((token & VKD3D_SM4_SWIZZLE_MASK) >> VKD3D_SM4_SWIZZLE_SHIFT);
|
|
if (swizzle != VKD3D_SHADER_NO_SWIZZLE)
|
|
{
|
|
- FIXME("Unhandled swizzle %#x.\n", swizzle);
|
|
- vkd3d_shader_parser_warning(&priv->p, VKD3D_SHADER_WARNING_TPF_UNHANDLED_REGISTER_SWIZZLE,
|
|
+ vkd3d_shader_parser_warning(&tpf->p, VKD3D_SHADER_WARNING_TPF_UNHANDLED_REGISTER_SWIZZLE,
|
|
"Unhandled destination register swizzle %#x.", swizzle);
|
|
}
|
|
- dst_param->write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
+ dst->write_mask = VKD3DSP_WRITEMASK_ALL;
|
|
break;
|
|
|
|
default:
|
|
- FIXME("Unhandled swizzle type %#x.\n", swizzle_type);
|
|
- vkd3d_shader_parser_error(&priv->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_SWIZZLE,
|
|
+ vkd3d_shader_parser_error(&tpf->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_SWIZZLE,
|
|
"Destination register swizzle type %#x is invalid.", swizzle_type);
|
|
break;
|
|
}
|
|
break;
|
|
|
|
default:
|
|
- FIXME("Unhandled dimension %#x.\n", dimension);
|
|
- vkd3d_shader_parser_error(&priv->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_DIMENSION,
|
|
+ vkd3d_shader_parser_error(&tpf->p, VKD3D_SHADER_ERROR_TPF_INVALID_REGISTER_DIMENSION,
|
|
"Destination register dimension %#x is invalid.", dimension);
|
|
break;
|
|
}
|
|
|
|
if (data_type == VSIR_DATA_F64)
|
|
- dst_param->write_mask = vsir_write_mask_64_from_32(dst_param->write_mask);
|
|
+ dst->write_mask = vsir_write_mask_64_from_32(dst->write_mask);
|
|
/* Some scalar registers are declared with no write mask in shader bytecode. */
|
|
- if (!dst_param->write_mask && shader_sm4_is_scalar_register(&dst_param->reg))
|
|
- dst_param->write_mask = VKD3DSP_WRITEMASK_0;
|
|
- dst_param->modifiers = 0;
|
|
- dst_param->shift = 0;
|
|
+ if (!dst->write_mask && shader_sm4_is_scalar_register(&dst->reg))
|
|
+ dst->write_mask = VKD3DSP_WRITEMASK_0;
|
|
+ dst->modifiers = 0;
|
|
+ dst->shift = 0;
|
|
|
|
- if (register_is_input_output(&dst_param->reg) && !shader_sm4_validate_input_output_register(priv,
|
|
- &dst_param->reg, dst_param->write_mask))
|
|
+ if (register_is_input_output(&dst->reg)
|
|
+ && !shader_sm4_validate_input_output_register(tpf, &dst->reg, dst->write_mask))
|
|
return false;
|
|
|
|
return true;
|
|
@@ -2612,9 +2578,9 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
|
|
const struct vkd3d_sm4_opcode_info *opcode_info;
|
|
uint32_t opcode_token, opcode, previous_token;
|
|
struct vsir_program *program = sm4->program;
|
|
- struct vkd3d_shader_dst_param *dst_params;
|
|
- struct vkd3d_shader_src_param *src_params;
|
|
const uint32_t **ptr = &sm4->ptr;
|
|
+ struct vsir_src_operand *src;
|
|
+ struct vsir_dst_operand *dst;
|
|
unsigned int i, len;
|
|
const uint32_t *p;
|
|
uint32_t precise;
|
|
@@ -2661,7 +2627,7 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
|
|
if (ins->opcode == VSIR_OP_HS_CONTROL_POINT_PHASE || ins->opcode == VSIR_OP_HS_FORK_PHASE
|
|
|| ins->opcode == VSIR_OP_HS_JOIN_PHASE)
|
|
sm4->phase = ins->opcode;
|
|
- sm4->has_control_point_phase |= ins->opcode == VSIR_OP_HS_CONTROL_POINT_PHASE;
|
|
+ sm4->has_control_point_phase |= vsir_opcode_is_control_point_phase(ins->opcode);
|
|
ins->flags = 0;
|
|
ins->coissue = false;
|
|
ins->raw = false;
|
|
@@ -2669,10 +2635,9 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
|
|
ins->predicate = NULL;
|
|
ins->dst_count = opcode_info_get_dst_count(opcode_info);
|
|
ins->src_count = opcode_info_get_src_count(opcode_info);
|
|
- ins->src = src_params = vsir_program_get_src_params(program, ins->src_count);
|
|
- if (!src_params && ins->src_count)
|
|
+ ins->src = src = vsir_program_get_src_operands(program, ins->src_count);
|
|
+ if (!src && ins->src_count)
|
|
{
|
|
- ERR("Failed to allocate src parameters.\n");
|
|
vkd3d_shader_parser_error(&sm4->p, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
|
|
ins->opcode = VSIR_OP_INVALID;
|
|
return;
|
|
@@ -2711,29 +2676,26 @@ static void shader_sm4_read_instruction(struct vkd3d_shader_sm4_parser *sm4, str
|
|
precise = (opcode_token & VKD3D_SM5_PRECISE_MASK) >> VKD3D_SM5_PRECISE_SHIFT;
|
|
ins->flags |= precise << VKD3DSI_PRECISE_SHIFT;
|
|
|
|
- ins->dst = dst_params = vsir_program_get_dst_params(program, ins->dst_count);
|
|
- if (!dst_params && ins->dst_count)
|
|
+ ins->dst = dst = vsir_program_get_dst_operands(program, ins->dst_count);
|
|
+ if (!dst && ins->dst_count)
|
|
{
|
|
- ERR("Failed to allocate dst parameters.\n");
|
|
vkd3d_shader_parser_error(&sm4->p, VKD3D_SHADER_ERROR_TPF_OUT_OF_MEMORY, "Out of memory.");
|
|
ins->opcode = VSIR_OP_INVALID;
|
|
return;
|
|
}
|
|
for (i = 0; i < ins->dst_count; ++i)
|
|
{
|
|
- if (!(shader_sm4_read_dst_param(sm4, &p, *ptr, map_data_type(opcode_info->dst_info[i]),
|
|
- &dst_params[i])))
|
|
+ if (!(tpf_read_dst_operand(sm4, &p, *ptr, map_data_type(opcode_info->dst_info[i]), &dst[i])))
|
|
{
|
|
ins->opcode = VSIR_OP_INVALID;
|
|
return;
|
|
}
|
|
- dst_params[i].modifiers |= instruction_dst_modifier;
|
|
+ dst[i].modifiers |= instruction_dst_modifier;
|
|
}
|
|
|
|
for (i = 0; i < ins->src_count; ++i)
|
|
{
|
|
- if (!(shader_sm4_read_src_param(sm4, &p, *ptr, map_data_type(opcode_info->src_info[i]),
|
|
- &src_params[i])))
|
|
+ if (!(tpf_read_src_operand(sm4, &p, *ptr, map_data_type(opcode_info->src_info[i]), &src[i])))
|
|
{
|
|
ins->opcode = VSIR_OP_INVALID;
|
|
return;
|
|
@@ -2816,6 +2778,9 @@ static bool shader_sm4_init(struct vkd3d_shader_sm4_parser *sm4, struct vsir_pro
|
|
if (!vsir_program_init(program, compile_info,
|
|
&version, token_count / 7u + 20, VSIR_CF_STRUCTURED, VSIR_NORMALISED_SM4))
|
|
return false;
|
|
+
|
|
+ program->f32_denorm_mode = VSIR_DENORM_FLUSH_TO_ZERO;
|
|
+
|
|
vkd3d_shader_parser_init(&sm4->p, message_context, compile_info->source_name);
|
|
sm4->ptr = sm4->start;
|
|
sm4->program = program;
|
|
@@ -3414,10 +3379,10 @@ struct sm4_instruction
|
|
struct sm4_instruction_modifier modifiers[1];
|
|
unsigned int modifier_count;
|
|
|
|
- struct vkd3d_shader_dst_param dsts[2];
|
|
+ struct vsir_dst_operand dsts[2];
|
|
unsigned int dst_count;
|
|
|
|
- struct vkd3d_shader_src_param srcs[5];
|
|
+ struct vsir_src_operand srcs[5];
|
|
unsigned int src_count;
|
|
|
|
unsigned int byte_stride;
|
|
@@ -3425,7 +3390,7 @@ struct sm4_instruction
|
|
uint32_t idx[3];
|
|
unsigned int idx_count;
|
|
|
|
- struct vkd3d_shader_src_param idx_srcs[7];
|
|
+ struct vsir_src_operand idx_srcs[7];
|
|
unsigned int idx_src_count;
|
|
};
|
|
|
|
@@ -3518,7 +3483,7 @@ static void sm4_write_register_index(const struct tpf_compiler *tpf, const struc
|
|
|
|
if (addressing & VKD3D_SM4_ADDRESSING_RELATIVE)
|
|
{
|
|
- const struct vkd3d_shader_src_param *idx_src = idx->rel_addr;
|
|
+ const struct vsir_src_operand *idx_src = idx->rel_addr;
|
|
uint32_t idx_src_token;
|
|
|
|
VKD3D_ASSERT(idx_src);
|
|
@@ -3535,7 +3500,7 @@ static void sm4_write_register_index(const struct tpf_compiler *tpf, const struc
|
|
}
|
|
}
|
|
|
|
-static void sm4_write_dst_register(const struct tpf_compiler *tpf, const struct vkd3d_shader_dst_param *dst)
|
|
+static void sm4_write_dst_register(const struct tpf_compiler *tpf, const struct vsir_dst_operand *dst)
|
|
{
|
|
struct vkd3d_bytecode_buffer *buffer = tpf->buffer;
|
|
uint32_t token = 0;
|
|
@@ -3548,7 +3513,7 @@ static void sm4_write_dst_register(const struct tpf_compiler *tpf, const struct
|
|
sm4_write_register_index(tpf, &dst->reg, j);
|
|
}
|
|
|
|
-static void sm4_write_src_register(const struct tpf_compiler *tpf, const struct vkd3d_shader_src_param *src)
|
|
+static void sm4_write_src_register(const struct tpf_compiler *tpf, const struct vsir_src_operand *src)
|
|
{
|
|
struct vkd3d_bytecode_buffer *buffer = tpf->buffer;
|
|
uint32_t token = 0, mod_token = 0;
|
|
@@ -3758,7 +3723,7 @@ static void tpf_dcl_indexable_temp(const struct tpf_compiler *tpf, const struct
|
|
}
|
|
|
|
static void tpf_dcl_semantic(const struct tpf_compiler *tpf, enum vkd3d_sm4_opcode opcode,
|
|
- const struct vkd3d_shader_dst_param *dst, uint32_t interpolation_flags)
|
|
+ const struct vsir_dst_operand *dst, uint32_t interpolation_flags)
|
|
{
|
|
struct sm4_instruction instr =
|
|
{
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
index ee113f57736..68285be0a49 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_main.c
|
|
@@ -319,14 +319,21 @@ void vkd3d_string_buffer_release(struct vkd3d_string_buffer_cache *cache, struct
|
|
cache->buffers[cache->count++] = buffer;
|
|
}
|
|
|
|
-void vkd3d_shader_code_from_string_buffer(struct vkd3d_shader_code *code, struct vkd3d_string_buffer *buffer)
|
|
+static char *vkd3d_shader_string_from_string_buffer(struct vkd3d_string_buffer *buffer)
|
|
{
|
|
- code->code = buffer->buffer;
|
|
- code->size = buffer->content_size;
|
|
+ char *s = buffer->buffer;
|
|
|
|
buffer->buffer = NULL;
|
|
buffer->buffer_size = 0;
|
|
buffer->content_size = 0;
|
|
+
|
|
+ return s;
|
|
+}
|
|
+
|
|
+void vkd3d_shader_code_from_string_buffer(struct vkd3d_shader_code *code, struct vkd3d_string_buffer *buffer)
|
|
+{
|
|
+ code->size = buffer->content_size;
|
|
+ code->code = vkd3d_shader_string_from_string_buffer(buffer);
|
|
}
|
|
|
|
void vkd3d_shader_message_context_init(struct vkd3d_shader_message_context *context,
|
|
@@ -347,23 +354,15 @@ void vkd3d_shader_message_context_trace_messages_(const struct vkd3d_shader_mess
|
|
vkd3d_string_buffer_trace_(&context->messages, function);
|
|
}
|
|
|
|
-bool vkd3d_shader_message_context_copy_messages(struct vkd3d_shader_message_context *context, char **out)
|
|
+void vkd3d_shader_string_from_message_context(char **out, struct vkd3d_shader_message_context *context)
|
|
{
|
|
- char *messages;
|
|
-
|
|
if (!out)
|
|
- return true;
|
|
-
|
|
- *out = NULL;
|
|
-
|
|
- if (!context->messages.content_size)
|
|
- return true;
|
|
+ return;
|
|
|
|
- if (!(messages = vkd3d_malloc(context->messages.content_size + 1)))
|
|
- return false;
|
|
- memcpy(messages, context->messages.buffer, context->messages.content_size + 1);
|
|
- *out = messages;
|
|
- return true;
|
|
+ if (context->messages.content_size)
|
|
+ *out = vkd3d_shader_string_from_string_buffer(&context->messages);
|
|
+ else
|
|
+ *out = NULL;
|
|
}
|
|
|
|
void vkd3d_shader_vnote(struct vkd3d_shader_message_context *context, const struct vkd3d_shader_location *location,
|
|
@@ -729,6 +728,7 @@ void vkd3d_shader_parser_init(struct vkd3d_shader_parser *parser,
|
|
parser->location.source_name = source_name;
|
|
parser->location.line = 1;
|
|
parser->location.column = 0;
|
|
+ parser->status = VKD3D_OK;
|
|
}
|
|
|
|
void VKD3D_PRINTF_FUNC(3, 4) vkd3d_shader_parser_error(struct vkd3d_shader_parser *parser,
|
|
@@ -1676,7 +1676,7 @@ static int vsir_program_scan(struct vsir_program *program, const struct vkd3d_sh
|
|
add_descriptor_info = true;
|
|
}
|
|
|
|
- if (program->has_descriptor_info)
|
|
+ if (program->normalisation_flags.has_descriptor_info)
|
|
add_descriptor_info = false;
|
|
|
|
tessellation_info = vkd3d_find_struct(compile_info->next, SCAN_HULL_SHADER_TESSELLATION_INFO);
|
|
@@ -1686,7 +1686,7 @@ static int vsir_program_scan(struct vsir_program *program, const struct vkd3d_sh
|
|
add_descriptor_info ? &program->descriptors : NULL, combined_sampler_info, message_context);
|
|
|
|
if (add_descriptor_info)
|
|
- program->has_descriptor_info = true;
|
|
+ program->normalisation_flags.has_descriptor_info = true;
|
|
|
|
if (TRACE_ON())
|
|
vsir_program_trace(program);
|
|
@@ -1771,8 +1771,7 @@ int vkd3d_shader_scan(const struct vkd3d_shader_compile_info *compile_info, char
|
|
}
|
|
|
|
vkd3d_shader_message_context_trace_messages(&message_context);
|
|
- if (!vkd3d_shader_message_context_copy_messages(&message_context, messages))
|
|
- ret = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ vkd3d_shader_string_from_message_context(messages, &message_context);
|
|
vkd3d_shader_message_context_cleanup(&message_context);
|
|
return ret;
|
|
}
|
|
@@ -1919,8 +1918,7 @@ int vkd3d_shader_compile(const struct vkd3d_shader_compile_info *compile_info,
|
|
vkd3d_shader_dump_shader(&dump_data, out->code, out->size, SHADER_DUMP_TYPE_TARGET);
|
|
|
|
vkd3d_shader_message_context_trace_messages(&message_context);
|
|
- if (!vkd3d_shader_message_context_copy_messages(&message_context, messages))
|
|
- ret = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ vkd3d_shader_string_from_message_context(messages, &message_context);
|
|
vkd3d_shader_message_context_cleanup(&message_context);
|
|
return ret;
|
|
}
|
|
@@ -2038,9 +2036,7 @@ int vkd3d_shader_parse_input_signature(const struct vkd3d_shader_code *dxbc,
|
|
|
|
ret = shader_parse_input_signature(dxbc, &message_context, &shader_signature);
|
|
vkd3d_shader_message_context_trace_messages(&message_context);
|
|
- if (!vkd3d_shader_message_context_copy_messages(&message_context, messages))
|
|
- ret = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
-
|
|
+ vkd3d_shader_string_from_message_context(messages, &message_context);
|
|
vkd3d_shader_message_context_cleanup(&message_context);
|
|
|
|
if (!vkd3d_shader_signature_from_shader_signature(signature, &shader_signature))
|
|
@@ -2245,8 +2241,7 @@ int vkd3d_shader_preprocess(const struct vkd3d_shader_compile_info *compile_info
|
|
vkd3d_shader_dump_shader(&dump_data, out->code, out->size, SHADER_DUMP_TYPE_PREPROC);
|
|
|
|
vkd3d_shader_message_context_trace_messages(&message_context);
|
|
- if (!vkd3d_shader_message_context_copy_messages(&message_context, messages))
|
|
- ret = VKD3D_ERROR_OUT_OF_MEMORY;
|
|
+ vkd3d_shader_string_from_message_context(messages, &message_context);
|
|
vkd3d_shader_message_context_cleanup(&message_context);
|
|
return ret;
|
|
}
|
|
diff --git a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
index 97fe5238046..794600302f9 100644
|
|
--- a/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
+++ b/libs/vkd3d/libs/vkd3d-shader/vkd3d_shader_private.h
|
|
@@ -229,6 +229,10 @@ enum vkd3d_shader_error
|
|
VKD3D_SHADER_ERROR_DXIL_INVALID_RESOURCE_HANDLE = 8019,
|
|
VKD3D_SHADER_ERROR_DXIL_INVALID_CONSTANT = 8020,
|
|
VKD3D_SHADER_ERROR_DXIL_NOT_IMPLEMENTED = 8021,
|
|
+ VKD3D_SHADER_ERROR_DXIL_DUPLICATED_BLOCK = 8022,
|
|
+ VKD3D_SHADER_ERROR_DXIL_INVALID_STRING = 8023,
|
|
+ VKD3D_SHADER_ERROR_DXIL_INVALID_ATTRIBUTE_KIND = 8024,
|
|
+ VKD3D_SHADER_ERROR_DXIL_INVALID_ATTRIBUTE = 8025,
|
|
|
|
VKD3D_SHADER_WARNING_DXIL_UNKNOWN_MAGIC_NUMBER = 8300,
|
|
VKD3D_SHADER_WARNING_DXIL_UNKNOWN_SHADER_TYPE = 8301,
|
|
@@ -241,6 +245,7 @@ enum vkd3d_shader_error
|
|
VKD3D_SHADER_WARNING_DXIL_INVALID_OPERATION = 8308,
|
|
VKD3D_SHADER_WARNING_DXIL_IGNORING_ATTACHMENT = 8309,
|
|
VKD3D_SHADER_WARNING_DXIL_UNDEFINED_OPERAND = 8310,
|
|
+ VKD3D_SHADER_WARNING_DXIL_IGNORING_RECORD = 8311,
|
|
|
|
VKD3D_SHADER_ERROR_VSIR_NOT_IMPLEMENTED = 9000,
|
|
VKD3D_SHADER_ERROR_VSIR_INVALID_OPCODE = 9001,
|
|
@@ -628,6 +633,16 @@ enum vkd3d_shader_opcode
|
|
|
|
const char *vsir_opcode_get_name(enum vkd3d_shader_opcode op, const char *error);
|
|
|
|
+static inline bool vsir_opcode_is_fork_or_join_phase(enum vkd3d_shader_opcode op)
|
|
+{
|
|
+ return op == VSIR_OP_HS_FORK_PHASE || op == VSIR_OP_HS_JOIN_PHASE;
|
|
+}
|
|
+
|
|
+static inline bool vsir_opcode_is_control_point_phase(enum vkd3d_shader_opcode op)
|
|
+{
|
|
+ return op == VSIR_OP_HS_CONTROL_POINT_PHASE;
|
|
+}
|
|
+
|
|
enum vkd3d_shader_register_type
|
|
{
|
|
VKD3DSPR_TEMP,
|
|
@@ -969,6 +984,13 @@ struct vkd3d_shader_version
|
|
uint8_t minor;
|
|
};
|
|
|
|
+struct vsir_normalisation_flags
|
|
+{
|
|
+ bool has_descriptor_info;
|
|
+ bool has_no_modifiers;
|
|
+ bool normalised_clip_cull_arrays;
|
|
+};
|
|
+
|
|
struct vkd3d_shader_immediate_constant_buffer
|
|
{
|
|
unsigned int register_idx;
|
|
@@ -993,7 +1015,7 @@ struct vkd3d_shader_indexable_temp
|
|
|
|
struct vkd3d_shader_register_index
|
|
{
|
|
- struct vkd3d_shader_src_param *rel_addr;
|
|
+ struct vsir_src_operand *rel_addr;
|
|
unsigned int offset;
|
|
/* address is known to fall within the object (for optimisation) */
|
|
bool is_in_bounds;
|
|
@@ -1052,7 +1074,7 @@ static inline enum vkd3d_shader_register_type vsir_register_type_from_sysval_inp
|
|
}
|
|
}
|
|
|
|
-struct vkd3d_shader_dst_param
|
|
+struct vsir_dst_operand
|
|
{
|
|
struct vkd3d_shader_register reg;
|
|
uint32_t write_mask;
|
|
@@ -1060,23 +1082,24 @@ struct vkd3d_shader_dst_param
|
|
unsigned int shift;
|
|
};
|
|
|
|
-struct vkd3d_shader_src_param
|
|
+void vsir_dst_operand_init(struct vsir_dst_operand *dst, enum vkd3d_shader_register_type reg_type,
|
|
+ enum vsir_data_type data_type, unsigned int idx_count);
|
|
+void vsir_dst_operand_init_null(struct vsir_dst_operand *dst);
|
|
+
|
|
+struct vsir_src_operand
|
|
{
|
|
struct vkd3d_shader_register reg;
|
|
uint32_t swizzle;
|
|
enum vkd3d_shader_src_modifier modifiers;
|
|
};
|
|
|
|
-void vsir_src_param_init(struct vkd3d_shader_src_param *param, enum vkd3d_shader_register_type reg_type,
|
|
- enum vsir_data_type data_type, unsigned int idx_count);
|
|
-void vsir_dst_param_init(struct vkd3d_shader_dst_param *param, enum vkd3d_shader_register_type reg_type,
|
|
+void vsir_src_operand_init(struct vsir_src_operand *src, enum vkd3d_shader_register_type reg_type,
|
|
enum vsir_data_type data_type, unsigned int idx_count);
|
|
-void vsir_dst_param_init_null(struct vkd3d_shader_dst_param *dst);
|
|
-void vsir_src_param_init_label(struct vkd3d_shader_src_param *param, unsigned int label_id);
|
|
+void vsir_src_operand_init_label(struct vsir_src_operand *src, unsigned int label_id);
|
|
|
|
struct vkd3d_shader_index_range
|
|
{
|
|
- struct vkd3d_shader_dst_param dst;
|
|
+ struct vsir_dst_operand dst;
|
|
unsigned int register_count;
|
|
};
|
|
|
|
@@ -1088,7 +1111,7 @@ struct vkd3d_shader_register_range
|
|
|
|
struct vkd3d_shader_resource
|
|
{
|
|
- struct vkd3d_shader_dst_param reg;
|
|
+ struct vsir_dst_operand reg;
|
|
struct vkd3d_shader_register_range range;
|
|
};
|
|
|
|
@@ -1152,6 +1175,17 @@ enum vkd3d_shader_input_sysval_semantic
|
|
|
|
#define SIGNATURE_TARGET_LOCATION_UNUSED (~0u)
|
|
|
|
+static inline bool vsir_sysval_semantic_is_tess_factor(enum vkd3d_shader_sysval_semantic sysval_semantic)
|
|
+{
|
|
+ return sysval_semantic >= VKD3D_SHADER_SV_TESS_FACTOR_QUADEDGE
|
|
+ && sysval_semantic <= VKD3D_SHADER_SV_TESS_FACTOR_LINEDEN;
|
|
+}
|
|
+
|
|
+static inline bool vsir_sysval_semantic_is_clip_cull(enum vkd3d_shader_sysval_semantic sysval_semantic)
|
|
+{
|
|
+ return sysval_semantic == VKD3D_SHADER_SV_CLIP_DISTANCE || sysval_semantic == VKD3D_SHADER_SV_CULL_DISTANCE;
|
|
+}
|
|
+
|
|
struct signature_element
|
|
{
|
|
/* sort_index is not a property of the signature element, it is just a
|
|
@@ -1175,6 +1209,20 @@ struct signature_element
|
|
unsigned int target_location;
|
|
};
|
|
|
|
+static inline bool vsir_signature_element_is_array(const struct signature_element *element,
|
|
+ const struct vsir_normalisation_flags *flags)
|
|
+{
|
|
+ enum vkd3d_shader_sysval_semantic semantic = element->sysval_semantic;
|
|
+
|
|
+ if (element->register_count > 1)
|
|
+ return true;
|
|
+ if (vsir_sysval_semantic_is_tess_factor(semantic))
|
|
+ return true;
|
|
+ if (flags->normalised_clip_cull_arrays && vsir_sysval_semantic_is_clip_cull(semantic))
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
struct shader_signature
|
|
{
|
|
struct signature_element *elements;
|
|
@@ -1182,21 +1230,11 @@ struct shader_signature
|
|
unsigned int element_count;
|
|
};
|
|
|
|
-static inline bool vsir_sysval_semantic_is_tess_factor(enum vkd3d_shader_sysval_semantic sysval_semantic)
|
|
-{
|
|
- return sysval_semantic >= VKD3D_SHADER_SV_TESS_FACTOR_QUADEDGE
|
|
- && sysval_semantic <= VKD3D_SHADER_SV_TESS_FACTOR_LINEDEN;
|
|
-}
|
|
-
|
|
-static inline bool vsir_sysval_semantic_is_clip_cull(enum vkd3d_shader_sysval_semantic sysval_semantic)
|
|
-{
|
|
- return sysval_semantic == VKD3D_SHADER_SV_CLIP_DISTANCE || sysval_semantic == VKD3D_SHADER_SV_CULL_DISTANCE;
|
|
-}
|
|
-
|
|
struct signature_element *vsir_signature_find_element_for_reg(const struct shader_signature *signature,
|
|
unsigned int reg_idx, unsigned int write_mask);
|
|
bool vsir_signature_find_sysval(const struct shader_signature *signature,
|
|
enum vkd3d_shader_sysval_semantic sysval, unsigned int semantic_index, unsigned int *element_index);
|
|
+unsigned int vsir_signature_next_location(const struct shader_signature *signature);
|
|
void shader_signature_cleanup(struct shader_signature *signature);
|
|
|
|
struct vsir_features
|
|
@@ -1218,19 +1256,19 @@ struct dxbc_shader_desc
|
|
|
|
struct vkd3d_shader_register_semantic
|
|
{
|
|
- struct vkd3d_shader_dst_param reg;
|
|
+ struct vsir_dst_operand reg;
|
|
enum vkd3d_shader_input_sysval_semantic sysval_semantic;
|
|
};
|
|
|
|
struct vkd3d_shader_sampler
|
|
{
|
|
- struct vkd3d_shader_src_param src;
|
|
+ struct vsir_src_operand src;
|
|
struct vkd3d_shader_register_range range;
|
|
};
|
|
|
|
struct vkd3d_shader_constant_buffer
|
|
{
|
|
- struct vkd3d_shader_src_param src;
|
|
+ struct vsir_src_operand src;
|
|
unsigned int size;
|
|
struct vkd3d_shader_register_range range;
|
|
};
|
|
@@ -1254,7 +1292,7 @@ struct vkd3d_shader_tgsm
|
|
|
|
struct vkd3d_shader_tgsm_raw
|
|
{
|
|
- struct vkd3d_shader_dst_param reg;
|
|
+ struct vsir_dst_operand reg;
|
|
unsigned int alignment;
|
|
unsigned int byte_count;
|
|
bool zero_init;
|
|
@@ -1262,7 +1300,7 @@ struct vkd3d_shader_tgsm_raw
|
|
|
|
struct vkd3d_shader_tgsm_structured
|
|
{
|
|
- struct vkd3d_shader_dst_param reg;
|
|
+ struct vsir_dst_operand reg;
|
|
unsigned int alignment;
|
|
unsigned int byte_stride;
|
|
unsigned int structure_count;
|
|
@@ -1324,21 +1362,21 @@ struct vkd3d_shader_instruction
|
|
uint32_t flags;
|
|
size_t dst_count;
|
|
size_t src_count;
|
|
- struct vkd3d_shader_dst_param *dst;
|
|
- struct vkd3d_shader_src_param *src;
|
|
+ struct vsir_dst_operand *dst;
|
|
+ struct vsir_src_operand *src;
|
|
struct vkd3d_shader_texel_offset texel_offset;
|
|
enum vkd3d_shader_resource_type resource_type;
|
|
unsigned int resource_stride;
|
|
enum vsir_data_type resource_data_type[VKD3D_VEC4_SIZE];
|
|
bool coissue, structured, raw;
|
|
- const struct vkd3d_shader_src_param *predicate;
|
|
+ const struct vsir_src_operand *predicate;
|
|
union
|
|
{
|
|
enum vsir_global_flags global_flags;
|
|
struct vkd3d_shader_semantic semantic;
|
|
struct vkd3d_shader_register_semantic register_semantic;
|
|
struct vkd3d_shader_primitive_type primitive_type;
|
|
- struct vkd3d_shader_dst_param dst;
|
|
+ struct vsir_dst_operand dst;
|
|
struct vkd3d_shader_constant_buffer cb;
|
|
struct vkd3d_shader_sampler sampler;
|
|
unsigned int count;
|
|
@@ -1536,6 +1574,24 @@ static inline struct vkd3d_shader_instruction *vsir_program_iterator_insert_befo
|
|
return vsir_program_iterator_current(it);
|
|
}
|
|
|
|
+static inline void vsir_program_iterator_nop_range(const struct vsir_program_iterator *first,
|
|
+ const struct vsir_program_iterator *last, const struct vkd3d_shader_location *location)
|
|
+{
|
|
+ const struct vkd3d_shader_instruction_array *array = first->array;
|
|
+ size_t first_idx = first->idx;
|
|
+ size_t last_idx = last->idx;
|
|
+ size_t idx;
|
|
+
|
|
+ VKD3D_ASSERT(last->array == array);
|
|
+ VKD3D_ASSERT(last_idx < array->count);
|
|
+ VKD3D_ASSERT(first_idx <= last_idx);
|
|
+
|
|
+ for (idx = first_idx; idx <= last_idx; ++idx)
|
|
+ {
|
|
+ vsir_instruction_init(&array->elements[idx], location, VSIR_OP_NOP);
|
|
+ }
|
|
+}
|
|
+
|
|
enum vkd3d_shader_config_flags
|
|
{
|
|
VKD3D_SHADER_CONFIG_FLAG_FORCE_VALIDATION = 0x00000001,
|
|
@@ -1555,6 +1611,13 @@ enum vsir_normalisation_level
|
|
VSIR_NORMALISED_SM6,
|
|
};
|
|
|
|
+enum vsir_denorm_mode
|
|
+{
|
|
+ VSIR_DENORM_ANY = 0,
|
|
+ VSIR_DENORM_PRESERVE,
|
|
+ VSIR_DENORM_FLUSH_TO_ZERO,
|
|
+};
|
|
+
|
|
struct vkd3d_shader_descriptor_info1
|
|
{
|
|
enum vkd3d_shader_descriptor_type type;
|
|
@@ -1592,7 +1655,6 @@ struct vsir_program
|
|
struct shader_signature patch_constant_signature;
|
|
|
|
struct vkd3d_shader_scan_descriptor_info1 descriptors;
|
|
- bool has_descriptor_info;
|
|
size_t descriptors_size;
|
|
|
|
unsigned int parameter_count;
|
|
@@ -1611,14 +1673,15 @@ struct vsir_program
|
|
bool has_fog;
|
|
uint8_t diffuse_written_mask;
|
|
enum vsir_control_flow_type cf_type;
|
|
- enum vsir_normalisation_level normalisation_level;
|
|
- bool has_no_modifiers;
|
|
enum vkd3d_tessellator_domain tess_domain;
|
|
enum vkd3d_shader_tessellator_partitioning tess_partitioning;
|
|
enum vkd3d_shader_tessellator_output_primitive tess_output_primitive;
|
|
enum vkd3d_primitive_type input_primitive, output_topology;
|
|
unsigned int vertices_out_count;
|
|
|
|
+ enum vsir_normalisation_level normalisation_level;
|
|
+ struct vsir_normalisation_flags normalisation_flags;
|
|
+
|
|
uint32_t io_dcls[VKD3D_BITMAP_SIZE(VKD3DSPR_COUNT)];
|
|
|
|
struct vsir_features features;
|
|
@@ -1631,8 +1694,10 @@ struct vsir_program
|
|
size_t icb_capacity;
|
|
size_t icb_count;
|
|
|
|
- struct vkd3d_shader_param_allocator src_params;
|
|
- struct vkd3d_shader_param_allocator dst_params;
|
|
+ struct vkd3d_shader_param_allocator src_operands;
|
|
+ struct vkd3d_shader_param_allocator dst_operands;
|
|
+
|
|
+ enum vsir_denorm_mode f32_denorm_mode;
|
|
};
|
|
|
|
enum vkd3d_result vsir_allocate_temp_registers(struct vsir_program *program,
|
|
@@ -1661,8 +1726,7 @@ enum vkd3d_result vsir_program_transform_early(struct vsir_program *program, uin
|
|
const struct vkd3d_shader_compile_info *compile_info, struct vkd3d_shader_message_context *message_context);
|
|
enum vkd3d_result vsir_program_validate(struct vsir_program *program, uint64_t config_flags,
|
|
const char *source_name, struct vkd3d_shader_message_context *message_context);
|
|
-struct vkd3d_shader_src_param *vsir_program_create_outpointid_param(
|
|
- struct vsir_program *program);
|
|
+struct vsir_src_operand *vsir_program_create_outpointid_param(struct vsir_program *program);
|
|
bool vsir_instruction_init_with_params(struct vsir_program *program,
|
|
struct vkd3d_shader_instruction *ins, const struct vkd3d_shader_location *location,
|
|
enum vkd3d_shader_opcode opcode, unsigned int dst_count, unsigned int src_count);
|
|
@@ -1672,22 +1736,22 @@ static inline struct vkd3d_shader_instruction *vsir_program_append(struct vsir_p
|
|
return shader_instruction_array_append(&program->instructions);
|
|
}
|
|
|
|
-static inline struct vkd3d_shader_dst_param *vsir_program_get_dst_params(
|
|
+static inline struct vsir_dst_operand *vsir_program_get_dst_operands(
|
|
struct vsir_program *program, unsigned int count)
|
|
{
|
|
- struct vkd3d_shader_param_allocator *allocator = &program->dst_params;
|
|
+ struct vkd3d_shader_param_allocator *allocator = &program->dst_operands;
|
|
|
|
- VKD3D_ASSERT(allocator->stride == sizeof(struct vkd3d_shader_dst_param));
|
|
+ VKD3D_ASSERT(allocator->stride == sizeof(struct vsir_dst_operand));
|
|
|
|
return shader_param_allocator_get(allocator, count);
|
|
}
|
|
|
|
-static inline struct vkd3d_shader_src_param *vsir_program_get_src_params(
|
|
+static inline struct vsir_src_operand *vsir_program_get_src_operands(
|
|
struct vsir_program *program, unsigned int count)
|
|
{
|
|
- struct vkd3d_shader_param_allocator *allocator = &program->src_params;
|
|
+ struct vkd3d_shader_param_allocator *allocator = &program->src_operands;
|
|
|
|
- VKD3D_ASSERT(allocator->stride == sizeof(struct vkd3d_shader_src_param));
|
|
+ VKD3D_ASSERT(allocator->stride == sizeof(struct vsir_src_operand));
|
|
|
|
return shader_param_allocator_get(allocator, count);
|
|
}
|
|
@@ -1797,7 +1861,6 @@ struct vkd3d_shader_message_context
|
|
};
|
|
|
|
void vkd3d_shader_message_context_cleanup(struct vkd3d_shader_message_context *context);
|
|
-bool vkd3d_shader_message_context_copy_messages(struct vkd3d_shader_message_context *context, char **out);
|
|
void vkd3d_shader_message_context_init(struct vkd3d_shader_message_context *context,
|
|
enum vkd3d_shader_log_level log_level);
|
|
void vkd3d_shader_message_context_trace_messages_(const struct vkd3d_shader_message_context *context,
|
|
@@ -1815,6 +1878,8 @@ void vkd3d_shader_warning(struct vkd3d_shader_message_context *context, const st
|
|
void vkd3d_shader_vwarning(struct vkd3d_shader_message_context *context, const struct vkd3d_shader_location *location,
|
|
enum vkd3d_shader_error error, const char *format, va_list args);
|
|
|
|
+void vkd3d_shader_string_from_message_context(char **out, struct vkd3d_shader_message_context *context);
|
|
+
|
|
uint64_t vkd3d_shader_init_config_flags(void);
|
|
void vkd3d_shader_trace_text_(const char *text, size_t size, const char *function);
|
|
#define vkd3d_shader_trace_text(text, size) \
|
|
diff --git a/libs/vkd3d/libs/vkd3d/command.c b/libs/vkd3d/libs/vkd3d/command.c
|
|
index 69f42280e8a..7d9a86e909f 100644
|
|
--- a/libs/vkd3d/libs/vkd3d/command.c
|
|
+++ b/libs/vkd3d/libs/vkd3d/command.c
|
|
@@ -31,6 +31,43 @@ static void d3d12_command_queue_submit_locked(struct d3d12_command_queue *queue)
|
|
static HRESULT d3d12_command_queue_flush_ops(struct d3d12_command_queue *queue, bool *flushed_any);
|
|
static HRESULT d3d12_command_queue_flush_ops_locked(struct d3d12_command_queue *queue, bool *flushed_any);
|
|
|
|
+static void vkd3d_resource_list_cleanup(struct vkd3d_resource_list *list)
|
|
+{
|
|
+ vkd3d_free(list->resources);
|
|
+}
|
|
+
|
|
+static void vkd3d_resource_list_init(struct vkd3d_resource_list *list)
|
|
+{
|
|
+ list->resources = NULL;
|
|
+ list->count = 0;
|
|
+ list->capacity = 0;
|
|
+}
|
|
+
|
|
+static bool vkd3d_resource_list_contains(const struct vkd3d_resource_list *list, struct d3d12_resource *resource)
|
|
+{
|
|
+ size_t i;
|
|
+
|
|
+ for (i = 0; i < list->count; i++)
|
|
+ {
|
|
+ if (list->resources[i] == resource)
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static void vkd3d_resource_list_append(struct vkd3d_resource_list *list, struct d3d12_resource *resource)
|
|
+{
|
|
+ if (!vkd3d_array_reserve((void **)&list->resources, &list->capacity, list->count + 1, sizeof(*list->resources)))
|
|
+ ERR("Failed to grow resource list.\n");
|
|
+ list->resources[list->count++] = resource;
|
|
+}
|
|
+
|
|
+static void vkd3d_resource_list_clear(struct vkd3d_resource_list *list)
|
|
+{
|
|
+ list->count = 0;
|
|
+}
|
|
+
|
|
static void vkd3d_null_event_signal(struct vkd3d_null_event *e)
|
|
{
|
|
vkd3d_mutex_lock(&e->mutex);
|
|
@@ -2533,6 +2570,9 @@ static ULONG STDMETHODCALLTYPE d3d12_command_list_Release(ID3D12GraphicsCommandL
|
|
vkd3d_pipeline_bindings_cleanup(&list->pipeline_bindings[VKD3D_PIPELINE_BIND_POINT_COMPUTE]);
|
|
vkd3d_pipeline_bindings_cleanup(&list->pipeline_bindings[VKD3D_PIPELINE_BIND_POINT_GRAPHICS]);
|
|
|
|
+ vkd3d_resource_list_cleanup(&list->rtv_resources_since_last_barrier);
|
|
+ vkd3d_resource_list_cleanup(&list->dsv_resources_since_last_barrier);
|
|
+
|
|
vkd3d_free(list);
|
|
|
|
d3d12_device_release(device);
|
|
@@ -2659,6 +2699,10 @@ static void d3d12_command_list_reset_state(struct d3d12_command_list *list,
|
|
list->fb_width = 0;
|
|
list->fb_height = 0;
|
|
list->fb_layer_count = 0;
|
|
+ memset(list->rtv_resources, 0, sizeof(list->rtv_resources));
|
|
+ list->dsv_resource = NULL;
|
|
+ vkd3d_resource_list_clear(&list->rtv_resources_since_last_barrier);
|
|
+ vkd3d_resource_list_clear(&list->dsv_resources_since_last_barrier);
|
|
|
|
list->xfb_enabled = false;
|
|
list->has_depth_bounds = false;
|
|
@@ -3469,6 +3513,82 @@ static bool d3d12_command_list_update_compute_state(struct d3d12_command_list *l
|
|
return true;
|
|
}
|
|
|
|
+/* Add a barrier to prevent hazards between multiple render passes to the same image. */
|
|
+static void d3d12_command_list_emit_rt_barrier(struct d3d12_command_list *list, bool colour, bool depth)
|
|
+{
|
|
+ const struct vkd3d_vk_device_procs *vk_procs = &list->device->vk_procs;
|
|
+ VkMemoryBarrier barrier = { VK_STRUCTURE_TYPE_MEMORY_BARRIER };
|
|
+ VkPipelineStageFlags srcStage = 0;
|
|
+ VkPipelineStageFlags dstStage = 0;
|
|
+
|
|
+ if (colour)
|
|
+ {
|
|
+ srcStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
|
|
+ dstStage |= VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
|
|
+ barrier.srcAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
|
|
+ barrier.dstAccessMask |= VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT;
|
|
+ }
|
|
+
|
|
+ if (depth)
|
|
+ {
|
|
+ srcStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
|
|
+ dstStage |= VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
|
|
+ barrier.srcAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
|
|
+ barrier.dstAccessMask |= VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
|
|
+ | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
|
|
+ }
|
|
+
|
|
+ VK_CALL(vkCmdPipelineBarrier(list->vk_command_buffer, srcStage, dstStage,
|
|
+ VK_DEPENDENCY_BY_REGION_BIT, 1, &barrier, 0, NULL, 0, NULL));
|
|
+ if (colour)
|
|
+ vkd3d_resource_list_clear(&list->rtv_resources_since_last_barrier);
|
|
+ if (depth)
|
|
+ vkd3d_resource_list_clear(&list->rtv_resources_since_last_barrier);
|
|
+}
|
|
+
|
|
+static void d3d12_command_list_check_render_pass_hazards(struct d3d12_command_list *list)
|
|
+{
|
|
+ struct d3d12_graphics_pipeline_state *graphics = &list->state->u.graphics;
|
|
+ bool rtv_hazard = false;
|
|
+ bool dsv_hazard = false;
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < graphics->rt_count; ++i)
|
|
+ {
|
|
+ if (graphics->null_attachment_mask & (1u << i))
|
|
+ continue;
|
|
+
|
|
+ if (!list->rtv_resources[i])
|
|
+ continue;
|
|
+
|
|
+ if (vkd3d_resource_list_contains(&list->rtv_resources_since_last_barrier, list->rtv_resources[i]))
|
|
+ {
|
|
+ rtv_hazard = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dsv_hazard = d3d12_command_list_has_depth_stencil_view(list) && list->dsv_resource
|
|
+ && vkd3d_resource_list_contains(&list->dsv_resources_since_last_barrier, list->dsv_resource);
|
|
+
|
|
+ if (rtv_hazard || dsv_hazard)
|
|
+ d3d12_command_list_emit_rt_barrier(list, rtv_hazard, dsv_hazard);
|
|
+
|
|
+ for (i = 0; i < graphics->rt_count; ++i)
|
|
+ {
|
|
+ if (graphics->null_attachment_mask & (1u << i))
|
|
+ continue;
|
|
+
|
|
+ if (!list->rtv_resources[i])
|
|
+ continue;
|
|
+
|
|
+ vkd3d_resource_list_append(&list->rtv_resources_since_last_barrier, list->rtv_resources[i]);
|
|
+ }
|
|
+
|
|
+ if (d3d12_command_list_has_depth_stencil_view(list) && list->dsv_resource)
|
|
+ vkd3d_resource_list_append(&list->dsv_resources_since_last_barrier, list->dsv_resource);
|
|
+}
|
|
+
|
|
static bool d3d12_command_list_begin_render_pass(struct d3d12_command_list *list)
|
|
{
|
|
const struct vkd3d_vk_device_procs *vk_procs = &list->device->vk_procs;
|
|
@@ -3486,6 +3606,8 @@ static bool d3d12_command_list_begin_render_pass(struct d3d12_command_list *list
|
|
if (list->current_render_pass != VK_NULL_HANDLE)
|
|
return true;
|
|
|
|
+ d3d12_command_list_check_render_pass_hazards(list);
|
|
+
|
|
vk_render_pass = list->pso_render_pass;
|
|
VKD3D_ASSERT(vk_render_pass);
|
|
|
|
@@ -5137,6 +5259,7 @@ static void STDMETHODCALLTYPE d3d12_command_list_OMSetRenderTargets(ID3D12Graphi
|
|
{
|
|
WARN("RTV descriptor %u is not initialized.\n", i);
|
|
list->rtvs[i] = VK_NULL_HANDLE;
|
|
+ list->rtv_resources[i] = NULL;
|
|
continue;
|
|
}
|
|
|
|
@@ -5150,6 +5273,7 @@ static void STDMETHODCALLTYPE d3d12_command_list_OMSetRenderTargets(ID3D12Graphi
|
|
}
|
|
|
|
list->rtvs[i] = view->v.u.vk_image_view;
|
|
+ list->rtv_resources[i] = rtv_desc->resource;
|
|
list->fb_width = max(list->fb_width, rtv_desc->width);
|
|
list->fb_height = max(list->fb_height, rtv_desc->height);
|
|
list->fb_layer_count = max(list->fb_layer_count, rtv_desc->layer_count);
|
|
@@ -5171,9 +5295,11 @@ static void STDMETHODCALLTYPE d3d12_command_list_OMSetRenderTargets(ID3D12Graphi
|
|
{
|
|
WARN("Failed to add view.\n");
|
|
list->dsv = VK_NULL_HANDLE;
|
|
+ list->dsv_resource = NULL;
|
|
}
|
|
|
|
list->dsv = view->v.u.vk_image_view;
|
|
+ list->dsv_resource = dsv_desc->resource;
|
|
list->fb_width = max(list->fb_width, dsv_desc->width);
|
|
list->fb_height = max(list->fb_height, dsv_desc->height);
|
|
list->fb_layer_count = max(list->fb_layer_count, dsv_desc->layer_count);
|
|
@@ -5209,8 +5335,6 @@ static void d3d12_command_list_clear(struct d3d12_command_list *list,
|
|
unsigned int i;
|
|
VkResult vr;
|
|
|
|
- d3d12_command_list_end_current_render_pass(list);
|
|
-
|
|
if (!rect_count)
|
|
{
|
|
full_rect.top = 0;
|
|
@@ -5344,6 +5468,12 @@ static void STDMETHODCALLTYPE d3d12_command_list_ClearDepthStencilView(ID3D12Gra
|
|
ds_reference.attachment = 0;
|
|
ds_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
|
|
|
|
+ d3d12_command_list_end_current_render_pass(list);
|
|
+
|
|
+ if (vkd3d_resource_list_contains(&list->dsv_resources_since_last_barrier, dsv_desc->resource))
|
|
+ d3d12_command_list_emit_rt_barrier(list, false, true);
|
|
+ vkd3d_resource_list_append(&list->dsv_resources_since_last_barrier, dsv_desc->resource);
|
|
+
|
|
d3d12_command_list_clear(list, &attachment_desc, NULL, &ds_reference,
|
|
dsv_desc->view, dsv_desc->width, dsv_desc->height, dsv_desc->layer_count,
|
|
&clear_value, rect_count, rects);
|
|
@@ -5398,6 +5528,12 @@ static void STDMETHODCALLTYPE d3d12_command_list_ClearRenderTargetView(ID3D12Gra
|
|
clear_value.color.float32[3] = color[3];
|
|
}
|
|
|
|
+ d3d12_command_list_end_current_render_pass(list);
|
|
+
|
|
+ if (vkd3d_resource_list_contains(&list->rtv_resources_since_last_barrier, rtv_desc->resource))
|
|
+ d3d12_command_list_emit_rt_barrier(list, true, false);
|
|
+ vkd3d_resource_list_append(&list->rtv_resources_since_last_barrier, rtv_desc->resource);
|
|
+
|
|
d3d12_command_list_clear(list, &attachment_desc, &color_reference, NULL,
|
|
rtv_desc->view, rtv_desc->width, rtv_desc->height, rtv_desc->layer_count,
|
|
&clear_value, rect_count, rects);
|
|
@@ -6395,6 +6531,9 @@ static HRESULT d3d12_command_list_init(struct d3d12_command_list *list,
|
|
|
|
list->type = type;
|
|
|
|
+ vkd3d_resource_list_init(&list->rtv_resources_since_last_barrier);
|
|
+ vkd3d_resource_list_init(&list->dsv_resources_since_last_barrier);
|
|
+
|
|
if (FAILED(hr = vkd3d_private_store_init(&list->private_store)))
|
|
return hr;
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d/device.c b/libs/vkd3d/libs/vkd3d/device.c
|
|
index 6af5e2a5c7d..170ee7fe5aa 100644
|
|
--- a/libs/vkd3d/libs/vkd3d/device.c
|
|
+++ b/libs/vkd3d/libs/vkd3d/device.c
|
|
@@ -109,6 +109,7 @@ static const struct vkd3d_optional_extension_info optional_device_extensions[] =
|
|
VK_EXTENSION(EXT_FRAGMENT_SHADER_INTERLOCK, EXT_fragment_shader_interlock),
|
|
VK_EXTENSION(EXT_MUTABLE_DESCRIPTOR_TYPE, EXT_mutable_descriptor_type),
|
|
VK_EXTENSION(EXT_ROBUSTNESS_2, EXT_robustness2),
|
|
+ VK_EXTENSION(EXT_SAMPLER_FILTER_MINMAX, EXT_sampler_filter_minmax),
|
|
VK_EXTENSION(EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION, EXT_shader_demote_to_helper_invocation),
|
|
VK_EXTENSION(EXT_SHADER_STENCIL_EXPORT, EXT_shader_stencil_export),
|
|
VK_EXTENSION(EXT_SHADER_VIEWPORT_INDEX_LAYER, EXT_shader_viewport_index_layer),
|
|
@@ -232,18 +233,18 @@ static HRESULT vkd3d_vk_descriptor_heap_layouts_init(struct d3d12_device *device
|
|
switch (device->vk_descriptor_heap_layouts[set].type)
|
|
{
|
|
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
|
|
- device->vk_descriptor_heap_layouts[set].count = limits->uniform_buffer_max_descriptors;
|
|
+ device->vk_descriptor_heap_layouts[set].count = limits->max_cbv_descriptor_count;
|
|
break;
|
|
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
|
|
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
|
|
- device->vk_descriptor_heap_layouts[set].count = limits->sampled_image_max_descriptors;
|
|
+ device->vk_descriptor_heap_layouts[set].count = limits->max_srv_descriptor_count;
|
|
break;
|
|
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
|
|
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
|
|
- device->vk_descriptor_heap_layouts[set].count = limits->storage_image_max_descriptors;
|
|
+ device->vk_descriptor_heap_layouts[set].count = limits->max_uav_descriptor_count;
|
|
break;
|
|
case VK_DESCRIPTOR_TYPE_SAMPLER:
|
|
- device->vk_descriptor_heap_layouts[set].count = limits->sampler_max_descriptors;
|
|
+ device->vk_descriptor_heap_layouts[set].count = limits->max_sampler_descriptor_count;
|
|
break;
|
|
default:
|
|
ERR("Unhandled descriptor type %#x.\n", device->vk_descriptor_heap_layouts[set].type);
|
|
@@ -835,6 +836,7 @@ struct vkd3d_physical_device_info
|
|
/* properties */
|
|
VkPhysicalDeviceDescriptorIndexingPropertiesEXT descriptor_indexing_properties;
|
|
VkPhysicalDeviceMaintenance3Properties maintenance3_properties;
|
|
+ VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT filter_minmax_properties;
|
|
VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT texel_buffer_alignment_properties;
|
|
VkPhysicalDeviceTransformFeedbackPropertiesEXT xfb_properties;
|
|
VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT vertex_divisor_properties;
|
|
@@ -900,6 +902,8 @@ static void vkd3d_chain_physical_device_info_structures(struct vkd3d_physical_de
|
|
vk_prepend_struct(&info->properties2, &info->maintenance3_properties);
|
|
if (vulkan_info->EXT_descriptor_indexing)
|
|
vk_prepend_struct(&info->properties2, &info->descriptor_indexing_properties);
|
|
+ if (vulkan_info->EXT_sampler_filter_minmax)
|
|
+ vk_prepend_struct(&info->properties2, &info->filter_minmax_properties);
|
|
if (vulkan_info->EXT_texel_buffer_alignment)
|
|
vk_prepend_struct(&info->properties2, &info->texel_buffer_alignment_properties);
|
|
if (vulkan_info->EXT_transform_feedback)
|
|
@@ -936,6 +940,7 @@ static void vkd3d_physical_device_info_init(struct vkd3d_physical_device_info *i
|
|
info->properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
|
|
info->maintenance3_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES;
|
|
info->descriptor_indexing_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT;
|
|
+ info->filter_minmax_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT;
|
|
info->texel_buffer_alignment_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT;
|
|
info->xfb_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT;
|
|
info->vertex_divisor_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT;
|
|
@@ -1017,6 +1022,7 @@ static void vkd3d_trace_physical_device_limits(const struct vkd3d_physical_devic
|
|
const VkPhysicalDeviceLimits *limits = &info->properties2.properties.limits;
|
|
const VkPhysicalDeviceDescriptorIndexingPropertiesEXT *descriptor_indexing;
|
|
const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT *buffer_alignment;
|
|
+ const VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *minmax;
|
|
const VkPhysicalDeviceMaintenance3Properties *maintenance3;
|
|
const VkPhysicalDeviceTransformFeedbackPropertiesEXT *xfb;
|
|
|
|
@@ -1196,6 +1202,11 @@ static void vkd3d_trace_physical_device_limits(const struct vkd3d_physical_devic
|
|
TRACE(" maxPerSetDescriptors: %u.\n", maintenance3->maxPerSetDescriptors);
|
|
TRACE(" maxMemoryAllocationSize: %#"PRIx64".\n", maintenance3->maxMemoryAllocationSize);
|
|
|
|
+ minmax = &info->filter_minmax_properties;
|
|
+ TRACE(" VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT:\n");
|
|
+ TRACE(" filterMinmaxSingleComponentFormats: %#x.\n", minmax->filterMinmaxSingleComponentFormats);
|
|
+ TRACE(" filterMinmaxImageComponentMapping: %#x.\n", minmax->filterMinmaxImageComponentMapping);
|
|
+
|
|
buffer_alignment = &info->texel_buffer_alignment_properties;
|
|
TRACE(" VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT:\n");
|
|
TRACE(" storageTexelBufferOffsetAlignmentBytes: %#"PRIx64".\n",
|
|
@@ -1476,11 +1487,11 @@ static void vkd3d_init_feature_level(struct vkd3d_vulkan_info *vk_info,
|
|
static void vkd3d_device_descriptor_limits_init(struct vkd3d_device_descriptor_limits *limits,
|
|
const VkPhysicalDeviceLimits *device_limits)
|
|
{
|
|
- limits->uniform_buffer_max_descriptors = device_limits->maxDescriptorSetUniformBuffers;
|
|
- limits->sampled_image_max_descriptors = device_limits->maxDescriptorSetSampledImages;
|
|
- limits->storage_buffer_max_descriptors = device_limits->maxDescriptorSetStorageBuffers;
|
|
- limits->storage_image_max_descriptors = device_limits->maxDescriptorSetStorageImages;
|
|
- limits->sampler_max_descriptors = min(device_limits->maxDescriptorSetSamplers, VKD3D_MAX_DESCRIPTOR_SET_SAMPLERS);
|
|
+ limits->max_cbv_descriptor_count = device_limits->maxDescriptorSetUniformBuffers;
|
|
+ limits->max_srv_descriptor_count = device_limits->maxDescriptorSetSampledImages;
|
|
+ limits->max_uav_descriptor_count = device_limits->maxDescriptorSetStorageImages;
|
|
+ limits->max_sampler_descriptor_count = min(device_limits->maxDescriptorSetSamplers,
|
|
+ VKD3D_MAX_DESCRIPTOR_SET_SAMPLERS);
|
|
}
|
|
|
|
static void vkd3d_device_vk_heaps_descriptor_limits_init(struct vkd3d_device_descriptor_limits *limits,
|
|
@@ -1500,22 +1511,19 @@ static void vkd3d_device_vk_heaps_descriptor_limits_init(struct vkd3d_device_des
|
|
uav_divisor = properties->maxDescriptorSetUpdateAfterBindSampledImages >= (3u << 20) ? 3 : 2;
|
|
}
|
|
|
|
- limits->uniform_buffer_max_descriptors = min(min(properties->maxDescriptorSetUpdateAfterBindUniformBuffers,
|
|
+ limits->max_cbv_descriptor_count = min(min(properties->maxDescriptorSetUpdateAfterBindUniformBuffers,
|
|
properties->maxPerStageDescriptorUpdateAfterBindUniformBuffers - root_provision),
|
|
VKD3D_MAX_DESCRIPTOR_SET_CBVS_SRVS_UAVS);
|
|
- limits->sampled_image_max_descriptors = min(min(properties->maxDescriptorSetUpdateAfterBindSampledImages,
|
|
+ limits->max_srv_descriptor_count = min(min(properties->maxDescriptorSetUpdateAfterBindSampledImages,
|
|
properties->maxPerStageDescriptorUpdateAfterBindSampledImages / srv_divisor - root_provision),
|
|
VKD3D_MAX_DESCRIPTOR_SET_CBVS_SRVS_UAVS);
|
|
- limits->storage_buffer_max_descriptors = min(min(properties->maxDescriptorSetUpdateAfterBindStorageBuffers,
|
|
- properties->maxPerStageDescriptorUpdateAfterBindStorageBuffers - root_provision),
|
|
- VKD3D_MAX_DESCRIPTOR_SET_CBVS_SRVS_UAVS);
|
|
- limits->storage_image_max_descriptors = min(min(properties->maxDescriptorSetUpdateAfterBindStorageImages,
|
|
+ limits->max_uav_descriptor_count = min(min(properties->maxDescriptorSetUpdateAfterBindStorageImages,
|
|
properties->maxPerStageDescriptorUpdateAfterBindStorageImages / uav_divisor - root_provision),
|
|
VKD3D_MAX_DESCRIPTOR_SET_CBVS_SRVS_UAVS);
|
|
- limits->sampler_max_descriptors = min(min(properties->maxDescriptorSetUpdateAfterBindSamplers,
|
|
+ limits->max_sampler_descriptor_count = min(min(properties->maxDescriptorSetUpdateAfterBindSamplers,
|
|
properties->maxPerStageDescriptorUpdateAfterBindSamplers - root_provision),
|
|
VKD3D_MAX_DESCRIPTOR_SET_CBVS_SRVS_UAVS);
|
|
- limits->sampler_max_descriptors = min(limits->sampler_max_descriptors, VKD3D_MAX_DESCRIPTOR_SET_SAMPLERS);
|
|
+ limits->max_sampler_descriptor_count = min(limits->max_sampler_descriptor_count, VKD3D_MAX_DESCRIPTOR_SET_SAMPLERS);
|
|
}
|
|
|
|
static bool d3d12_device_supports_typed_uav_load_additional_formats(const struct d3d12_device *device)
|
|
@@ -1866,6 +1874,12 @@ static HRESULT vkd3d_init_device_caps(struct d3d12_device *device,
|
|
|
|
physical_device_info->formats4444_features.formatA4B4G4R4 = VK_FALSE;
|
|
|
|
+ if (!vulkan_info->EXT_sampler_filter_minmax)
|
|
+ WARN("Sampler min/max reduction filtering is not supported.\n");
|
|
+ else if (!physical_device_info->filter_minmax_properties.filterMinmaxSingleComponentFormats
|
|
+ || !physical_device_info->filter_minmax_properties.filterMinmaxImageComponentMapping)
|
|
+ WARN("Sampler min/max reduction filtering is only partially supported.");
|
|
+
|
|
vulkan_info->texel_buffer_alignment_properties = physical_device_info->texel_buffer_alignment_properties;
|
|
|
|
if (get_spec_version(vk_extensions, vk_extension_count, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME) >= 3)
|
|
@@ -2713,13 +2727,13 @@ static void device_init_descriptor_pool_sizes(struct d3d12_device *device)
|
|
const struct vkd3d_device_descriptor_limits *limits = &device->vk_info.descriptor_limits;
|
|
unsigned int *pool_sizes = device->vk_pool_limits;
|
|
|
|
- pool_sizes[VKD3D_SHADER_DESCRIPTOR_TYPE_CBV] = min(limits->uniform_buffer_max_descriptors,
|
|
+ pool_sizes[VKD3D_SHADER_DESCRIPTOR_TYPE_CBV] = min(limits->max_cbv_descriptor_count,
|
|
VKD3D_MAX_VIRTUAL_HEAP_DESCRIPTORS_PER_TYPE);
|
|
- pool_sizes[VKD3D_SHADER_DESCRIPTOR_TYPE_SRV] = min(limits->sampled_image_max_descriptors,
|
|
+ pool_sizes[VKD3D_SHADER_DESCRIPTOR_TYPE_SRV] = min(limits->max_srv_descriptor_count,
|
|
VKD3D_MAX_VIRTUAL_HEAP_DESCRIPTORS_PER_TYPE);
|
|
- pool_sizes[VKD3D_SHADER_DESCRIPTOR_TYPE_UAV] = min(limits->storage_image_max_descriptors,
|
|
+ pool_sizes[VKD3D_SHADER_DESCRIPTOR_TYPE_UAV] = min(limits->max_uav_descriptor_count,
|
|
VKD3D_MAX_VIRTUAL_HEAP_DESCRIPTORS_PER_TYPE);
|
|
- pool_sizes[VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER] = min(limits->sampler_max_descriptors,
|
|
+ pool_sizes[VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER] = min(limits->max_sampler_descriptor_count,
|
|
VKD3D_MAX_VIRTUAL_HEAP_DESCRIPTORS_PER_TYPE);
|
|
};
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d/resource.c b/libs/vkd3d/libs/vkd3d/resource.c
|
|
index 7946445ad07..f1491cbc2b6 100644
|
|
--- a/libs/vkd3d/libs/vkd3d/resource.c
|
|
+++ b/libs/vkd3d/libs/vkd3d/resource.c
|
|
@@ -3661,6 +3661,24 @@ bool vkd3d_create_raw_buffer_view(struct d3d12_device *device,
|
|
}
|
|
|
|
/* samplers */
|
|
+
|
|
+static VkSamplerReductionModeEXT vk_reduction_mode_from_d3d12(D3D12_FILTER_REDUCTION_TYPE mode)
|
|
+{
|
|
+ switch (mode)
|
|
+ {
|
|
+ case D3D12_FILTER_REDUCTION_TYPE_STANDARD:
|
|
+ case D3D12_FILTER_REDUCTION_TYPE_COMPARISON:
|
|
+ return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE;
|
|
+ case D3D12_FILTER_REDUCTION_TYPE_MINIMUM:
|
|
+ return VK_SAMPLER_REDUCTION_MODE_MIN;
|
|
+ case D3D12_FILTER_REDUCTION_TYPE_MAXIMUM:
|
|
+ return VK_SAMPLER_REDUCTION_MODE_MAX;
|
|
+ default:
|
|
+ FIXME("Unhandled reduction mode %#x.\n", mode);
|
|
+ return VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE;
|
|
+ }
|
|
+}
|
|
+
|
|
static VkFilter vk_filter_from_d3d12(D3D12_FILTER_TYPE type)
|
|
{
|
|
switch (type)
|
|
@@ -3734,16 +3752,13 @@ static VkResult d3d12_create_sampler(struct d3d12_device *device, D3D12_FILTER f
|
|
D3D12_COMPARISON_FUNC comparison_func, D3D12_STATIC_BORDER_COLOR border_colour,
|
|
float min_lod, float max_lod, VkSampler *vk_sampler)
|
|
{
|
|
+ VkSamplerReductionModeCreateInfoEXT reduction_desc;
|
|
const struct vkd3d_vk_device_procs *vk_procs;
|
|
struct VkSamplerCreateInfo sampler_desc;
|
|
VkResult vr;
|
|
|
|
vk_procs = &device->vk_procs;
|
|
|
|
- if (D3D12_DECODE_FILTER_REDUCTION(filter) == D3D12_FILTER_REDUCTION_TYPE_MINIMUM
|
|
- || D3D12_DECODE_FILTER_REDUCTION(filter) == D3D12_FILTER_REDUCTION_TYPE_MAXIMUM)
|
|
- FIXME("Min/max reduction mode not supported.\n");
|
|
-
|
|
sampler_desc.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
|
|
sampler_desc.pNext = NULL;
|
|
sampler_desc.flags = 0;
|
|
@@ -3767,6 +3782,21 @@ static VkResult d3d12_create_sampler(struct d3d12_device *device, D3D12_FILTER f
|
|
|| address_w == D3D12_TEXTURE_ADDRESS_MODE_BORDER)
|
|
sampler_desc.borderColor = vk_border_colour_from_d3d12(border_colour);
|
|
|
|
+ reduction_desc.reductionMode = vk_reduction_mode_from_d3d12(D3D12_DECODE_FILTER_REDUCTION(filter));
|
|
+ if (reduction_desc.reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE)
|
|
+ {
|
|
+ if (device->vk_info.EXT_sampler_filter_minmax)
|
|
+ {
|
|
+ reduction_desc.sType = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT;
|
|
+ reduction_desc.pNext = NULL;
|
|
+ vk_prepend_struct(&sampler_desc, &reduction_desc);
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ FIXME("Sampler min/max reduction filtering is not supported by the device.\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
if ((vr = VK_CALL(vkCreateSampler(device->vk_device, &sampler_desc, NULL, vk_sampler))) < 0)
|
|
WARN("Failed to create Vulkan sampler, vr %d.\n", vr);
|
|
|
|
diff --git a/libs/vkd3d/libs/vkd3d/state.c b/libs/vkd3d/libs/vkd3d/state.c
|
|
index 4bd97fd599f..d733165312c 100644
|
|
--- a/libs/vkd3d/libs/vkd3d/state.c
|
|
+++ b/libs/vkd3d/libs/vkd3d/state.c
|
|
@@ -938,19 +938,19 @@ static unsigned int vk_binding_count_from_descriptor_range(const struct d3d12_ro
|
|
switch (range->type)
|
|
{
|
|
case VKD3D_SHADER_DESCRIPTOR_TYPE_CBV:
|
|
- limit = limits->uniform_buffer_max_descriptors;
|
|
+ limit = limits->max_cbv_descriptor_count;
|
|
count = (limit - min(info->cbv_count, limit)) / info->cbv_unbounded_range_count;
|
|
break;
|
|
case VKD3D_SHADER_DESCRIPTOR_TYPE_SRV:
|
|
- limit = limits->sampled_image_max_descriptors;
|
|
+ limit = limits->max_srv_descriptor_count;
|
|
count = (limit - min(info->srv_count, limit)) / info->srv_unbounded_range_count;
|
|
break;
|
|
case VKD3D_SHADER_DESCRIPTOR_TYPE_UAV:
|
|
- limit = limits->storage_image_max_descriptors;
|
|
+ limit = limits->max_uav_descriptor_count;
|
|
count = (limit - min(info->uav_count, limit)) / info->uav_unbounded_range_count;
|
|
break;
|
|
case VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER:
|
|
- limit = limits->sampler_max_descriptors;
|
|
+ limit = limits->max_sampler_descriptor_count;
|
|
count = (limit - min(info->sampler_count, limit)) / info->sampler_unbounded_range_count;
|
|
break;
|
|
default:
|
|
@@ -1084,36 +1084,36 @@ static void vkd3d_descriptor_heap_binding_from_descriptor_range(const struct d3d
|
|
if (range->type == VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER)
|
|
{
|
|
binding->set = VKD3D_SET_INDEX_SAMPLER;
|
|
- descriptor_set_size = descriptor_limits->sampler_max_descriptors;
|
|
+ descriptor_set_size = descriptor_limits->max_sampler_descriptor_count;
|
|
}
|
|
else
|
|
{
|
|
binding->set = VKD3D_SET_INDEX_MUTABLE;
|
|
- descriptor_set_size = descriptor_limits->sampled_image_max_descriptors;
|
|
+ descriptor_set_size = descriptor_limits->max_srv_descriptor_count;
|
|
}
|
|
}
|
|
else switch (range->type)
|
|
{
|
|
case VKD3D_SHADER_DESCRIPTOR_TYPE_SRV:
|
|
binding->set = is_buffer ? VKD3D_SET_INDEX_UNIFORM_TEXEL_BUFFER : VKD3D_SET_INDEX_SAMPLED_IMAGE;
|
|
- descriptor_set_size = descriptor_limits->sampled_image_max_descriptors;
|
|
+ descriptor_set_size = descriptor_limits->max_srv_descriptor_count;
|
|
break;
|
|
case VKD3D_SHADER_DESCRIPTOR_TYPE_UAV:
|
|
binding->set = is_buffer ? VKD3D_SET_INDEX_STORAGE_TEXEL_BUFFER : VKD3D_SET_INDEX_STORAGE_IMAGE;
|
|
- descriptor_set_size = descriptor_limits->storage_image_max_descriptors;
|
|
+ descriptor_set_size = descriptor_limits->max_uav_descriptor_count;
|
|
break;
|
|
case VKD3D_SHADER_DESCRIPTOR_TYPE_CBV:
|
|
binding->set = VKD3D_SET_INDEX_UNIFORM_BUFFER;
|
|
- descriptor_set_size = descriptor_limits->uniform_buffer_max_descriptors;
|
|
+ descriptor_set_size = descriptor_limits->max_cbv_descriptor_count;
|
|
break;
|
|
case VKD3D_SHADER_DESCRIPTOR_TYPE_SAMPLER:
|
|
binding->set = VKD3D_SET_INDEX_SAMPLER;
|
|
- descriptor_set_size = descriptor_limits->sampler_max_descriptors;
|
|
+ descriptor_set_size = descriptor_limits->max_sampler_descriptor_count;
|
|
break;
|
|
default:
|
|
FIXME("Unhandled descriptor range type type %#x.\n", range->type);
|
|
binding->set = VKD3D_SET_INDEX_SAMPLED_IMAGE;
|
|
- descriptor_set_size = descriptor_limits->sampled_image_max_descriptors;
|
|
+ descriptor_set_size = descriptor_limits->max_srv_descriptor_count;
|
|
break;
|
|
}
|
|
binding->set += root_signature->vk_set_count;
|
|
@@ -1151,7 +1151,7 @@ static void d3d12_root_signature_map_vk_heap_uav_counter(struct d3d12_root_signa
|
|
mapping->binding.set = root_signature->vk_set_count + VKD3D_SET_INDEX_UAV_COUNTER;
|
|
mapping->binding.binding = 0;
|
|
mapping->binding.count = vk_heap_binding_count_from_descriptor_range(range,
|
|
- root_signature->device->vk_info.descriptor_limits.storage_image_max_descriptors);
|
|
+ root_signature->device->vk_info.descriptor_limits.max_uav_descriptor_count);
|
|
offset->static_offset = range->offset;
|
|
offset->dynamic_offset_index = context->push_constant_index;
|
|
}
|
|
diff --git a/libs/vkd3d/libs/vkd3d/vkd3d_private.h b/libs/vkd3d/libs/vkd3d/vkd3d_private.h
|
|
index 0a8c5aef674..f2e78503302 100644
|
|
--- a/libs/vkd3d/libs/vkd3d/vkd3d_private.h
|
|
+++ b/libs/vkd3d/libs/vkd3d/vkd3d_private.h
|
|
@@ -108,11 +108,10 @@ HRESULT hresult_from_vkd3d_result(int vkd3d_result);
|
|
|
|
struct vkd3d_device_descriptor_limits
|
|
{
|
|
- unsigned int uniform_buffer_max_descriptors;
|
|
- unsigned int sampled_image_max_descriptors;
|
|
- unsigned int storage_buffer_max_descriptors;
|
|
- unsigned int storage_image_max_descriptors;
|
|
- unsigned int sampler_max_descriptors;
|
|
+ unsigned int max_cbv_descriptor_count;
|
|
+ unsigned int max_srv_descriptor_count;
|
|
+ unsigned int max_uav_descriptor_count;
|
|
+ unsigned int max_sampler_descriptor_count;
|
|
};
|
|
|
|
struct vkd3d_vulkan_info
|
|
@@ -144,6 +143,7 @@ struct vkd3d_vulkan_info
|
|
bool EXT_fragment_shader_interlock;
|
|
bool EXT_mutable_descriptor_type;
|
|
bool EXT_robustness2;
|
|
+ bool EXT_sampler_filter_minmax;
|
|
bool EXT_shader_demote_to_helper_invocation;
|
|
bool EXT_shader_stencil_export;
|
|
bool EXT_shader_viewport_index_layer;
|
|
@@ -1277,6 +1277,13 @@ enum vkd3d_pipeline_bind_point
|
|
VKD3D_PIPELINE_BIND_POINT_COUNT = 0x2,
|
|
};
|
|
|
|
+struct vkd3d_resource_list
|
|
+{
|
|
+ struct d3d12_resource **resources;
|
|
+ size_t count;
|
|
+ size_t capacity;
|
|
+};
|
|
+
|
|
/* ID3D12CommandList */
|
|
struct d3d12_command_list
|
|
{
|
|
@@ -1302,6 +1309,13 @@ struct d3d12_command_list
|
|
unsigned int fb_layer_count;
|
|
VkFormat dsv_format;
|
|
|
|
+ /* Resources for views bound to d3d12 state */
|
|
+ struct d3d12_resource *rtv_resources[D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT];
|
|
+ struct d3d12_resource *dsv_resource;
|
|
+ /* Resources bound since the last pipeline barrier */
|
|
+ struct vkd3d_resource_list rtv_resources_since_last_barrier;
|
|
+ struct vkd3d_resource_list dsv_resources_since_last_barrier;
|
|
+
|
|
bool xfb_enabled;
|
|
bool has_depth_bounds;
|
|
bool is_predicated;
|
|
--
|
|
2.51.0
|
|
|