2022-12-14 11:42:35 -08:00
|
|
|
[require]
|
|
|
|
shader model >= 4.0
|
|
|
|
|
|
|
|
|
vkd3d-shader/hlsl: Reinterpret minimum precision types as their regular counterparts.
Reinterpret min16float, min10float, min16int, min12int, and min16uint
as their regular counterparts: float, float, int, int, uint,
respectively.
A proper implementation would require adding minimum precision
indicators to all the dxbc-tpf instructions that use these types.
Consider the output of fxc 10.1 with the following shader:
uniform int i;
float4 main() : sv_target
{
min16float4 a = {0, 1, 2, i};
min16int2 b = {4, i};
min10float3 c = {6.4, 7, i};
min12int d = 9.4;
min16uint4x2 e = {14.4, 15, 16, 17, 18, 19, 20, i};
return mul(e, b) + a + c.xyzx + d;
}
However, if the graphics driver doesn't have minimum precision support,
it ignores the minimum precision indicators and runs at 32-bit
precision, which is equivalent as working with regular types.
2022-12-07 13:06:06 -08:00
|
|
|
[pixel shader]
|
2022-12-14 11:42:35 -08:00
|
|
|
float4 main() : sv_target
|
|
|
|
{
|
|
|
|
min16float4 a = {0, 1, 2, 3};
|
|
|
|
min10float2 b = {4, 5};
|
|
|
|
min16int3 c = {6.4, 7, 8};
|
|
|
|
min12int d = 9.4;
|
|
|
|
min16uint4x2 e = {14.4, 15, 16, 17, 18, 19, 20, 21};
|
|
|
|
vector<min10float, 3> g = {22, 23, 24};
|
|
|
|
matrix<min16uint, 3, 2> h = {25.4, 26, 27, 28, 29, 30};
|
|
|
|
|
|
|
|
return mul(e, b) + a + c.xyzx + d + g.xxyz + h[2].xyxy;
|
|
|
|
}
|
|
|
|
|
|
|
|
[test]
|
vkd3d-shader/hlsl: Reinterpret minimum precision types as their regular counterparts.
Reinterpret min16float, min10float, min16int, min12int, and min16uint
as their regular counterparts: float, float, int, int, uint,
respectively.
A proper implementation would require adding minimum precision
indicators to all the dxbc-tpf instructions that use these types.
Consider the output of fxc 10.1 with the following shader:
uniform int i;
float4 main() : sv_target
{
min16float4 a = {0, 1, 2, i};
min16int2 b = {4, i};
min10float3 c = {6.4, 7, i};
min12int d = 9.4;
min16uint4x2 e = {14.4, 15, 16, 17, 18, 19, 20, i};
return mul(e, b) + a + c.xyzx + d;
}
However, if the graphics driver doesn't have minimum precision support,
it ignores the minimum precision indicators and runs at 32-bit
precision, which is equivalent as working with regular types.
2022-12-07 13:06:06 -08:00
|
|
|
draw quad
|
|
|
|
probe all rgba (197.0, 218.0, 238.0, 257.0)
|
2023-12-12 19:49:40 -08:00
|
|
|
|
|
|
|
|
|
|
|
[pixel shader]
|
|
|
|
uniform min16uint2 u;
|
|
|
|
|
|
|
|
uint4 main() : sv_target
|
|
|
|
{
|
|
|
|
min16uint i = 0x7fff, j = 0xffff;
|
|
|
|
return uint4(u.x + i, u.y + j, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
[test]
|
|
|
|
uniform 0 uint4 0 0 0 0
|
|
|
|
draw quad
|
|
|
|
probe all rgbaui (0x7fff, 0xffff, 0, 0)
|
|
|
|
|
|
|
|
|
|
|
|
% The code d3dcompiler_47 produces for this appears correct, but the result
|
|
|
|
% is still zero in Windows.
|
|
|
|
[require]
|
|
|
|
shader model >= 6.0
|
|
|
|
|
|
|
|
[pixel shader]
|
|
|
|
uniform min16uint4 u;
|
|
|
|
uniform uint i;
|
|
|
|
|
|
|
|
uint4 main() : sv_target
|
|
|
|
{
|
|
|
|
min16uint arr[4] = {1, 2, 0x7fff, 0xffff};
|
|
|
|
return uint4(u.x + arr[i], u.y + arr[i + 1], 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
[test]
|
|
|
|
uniform 0 uint4 0 0 0 0
|
|
|
|
uniform 4 uint 2
|
|
|
|
draw quad
|
|
|
|
probe all rgbaui (0x7fff, 0xffff, 0, 0)
|
|
|
|
uniform 0 uint4 0 0 0 0
|
|
|
|
uniform 4 uint 0
|
|
|
|
draw quad
|
|
|
|
probe all rgbaui (1, 2, 0, 0)
|