diff --git a/tests/d3d12.c b/tests/d3d12.c index 329602378..dabed44cf 100644 --- a/tests/d3d12.c +++ b/tests/d3d12.c @@ -5669,10 +5669,6 @@ static void test_clear_unordered_access_view_buffer(void) {DXGI_FORMAT_R11G11B10_FLOAT, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, {0, 0, 0, 0}, 0}, - {DXGI_FORMAT_R11G11B10_FLOAT, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, - {0x7ff, 0x7ff, 0x3ff, 0}, 0xffffffff}, - {DXGI_FORMAT_R11G11B10_FLOAT, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, - {0x7ff, 0, 0x3ff, 0}, 0xffc007ff}, {DXGI_FORMAT_R11G11B10_FLOAT, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, {0x3f000000 /* 0.5f */, 0x3f800000 /* 1.0f */, 0x40000000 /* 2.0f */, 0}, 0x801e0380, true}, {DXGI_FORMAT_R11G11B10_FLOAT, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, @@ -5905,7 +5901,12 @@ static void test_clear_unordered_access_view_image(void) {DXGI_FORMAT_R16G16_FLOAT, 1, 1, 0, 0, 1, 0, {}, {1, 2, 3, 4}, 0x00020001}, {DXGI_FORMAT_R8G8B8A8_UINT, 1, 1, 0, 0, 1, 0, {}, {1, 2, 3, 4}, 0x04030201}, {DXGI_FORMAT_R8G8B8A8_UNORM, 1, 1, 0, 0, 1, 0, {}, {1, 2, 3, 4}, 0x04030201}, - {DXGI_FORMAT_R11G11B10_FLOAT, 1, 1, 0, 0, 1, 0, {}, {1, 2, 3, 4}, 0x00c01001}, + /* Implementations have completely different behaviours here: AMD and + * WARP just shift the inputs in the bitfields, NVIDIA interprets them + * as floating-point numbers and converts them to the + * partial-precision target formats. We retain the test to exercise + * the code path, but there is no meaningful semantic to check. */ + {DXGI_FORMAT_R11G11B10_FLOAT, 1, 1, 0, 0, 1, 0, {}, {0, 0, 0, 0}, 0x00000000}, {DXGI_FORMAT_B5G6R5_UNORM, 1, 1, 0, 0, 1, 0, {}, {1, 2, 3, 4}, 0x00000843, false, false, true}, {DXGI_FORMAT_B5G5R5A1_UNORM, 1, 1, 0, 0, 1, 0, {}, {1, 2, 3, 1}, 0x00008443, false, false, true}, {DXGI_FORMAT_B4G4R4A4_UNORM, 1, 1, 0, 0, 1, 0, {}, {1, 2, 3, 1}, 0x00001123, false, false, true},