diff --git a/tests/hlsl/minimum-precision.shader_test b/tests/hlsl/minimum-precision.shader_test index 11c64353a..7111f9e04 100644 --- a/tests/hlsl/minimum-precision.shader_test +++ b/tests/hlsl/minimum-precision.shader_test @@ -24,56 +24,6 @@ probe (0, 0) rgba (197.0, 218.0, 238.0, 257.0) format r32g32b32a32-uint size (2d, 640, 480) -[pixel shader] -uniform min16uint2 u; - -uint4 main() : sv_target -{ - min16uint i = 0x7fff, j = 0xffff; - return uint4(u.x + i, u.y + j, 0, 0); -} - -[require] -shader model >= 4.0 -shader model < 6.0 - -[test] -uniform 0 uint4 0 0 0 0 -todo(msl) draw quad -probe (0, 0) rgbaui (0x7fff, 0xffff, 0, 0) - -[require] -shader model >= 6.0 - -[test] -uniform 0 uint4 0 0 0 0 -draw quad -probe (0, 0) rgbaui (0x7fff, 0xffffffff, 0, 0) - - -% The code d3dcompiler_47 produces for this appears correct, but the result -% is still zero in Windows. - -[pixel shader] -uniform min16uint4 u; -uniform uint i; - -uint4 main() : sv_target -{ - min16uint arr[4] = {1, 2, 0x7fff, 0xffff}; - return uint4(u.x + arr[i], u.y + arr[i + 1], 0, 0); -} - -[test] -uniform 0 uint4 0 0 0 0 -uniform 4 uint 2 -draw quad -probe (0, 0) rgbaui (0x7fff, 0xffffffff, 0, 0) -uniform 0 uint4 0 0 0 0 -uniform 4 uint 0 -draw quad -probe (0, 0) rgbaui (1, 2, 0, 0) - % In SM4-5 minimum precision integers in constant buffers are treated just like % their 32-bit counterparts.