diff --git a/tests/d3d12.c b/tests/d3d12.c index 98b69ba7..e164dab6 100644 --- a/tests/d3d12.c +++ b/tests/d3d12.c @@ -14180,7 +14180,7 @@ static void test_sample_instructions(void) float ps_constants[4]; const unsigned int *expected_data; bool bug_on_mvk; - bool bug_on_llvmpipe; + bool todo_on_llvmpipe; } tests[] = { @@ -14210,6 +14210,13 @@ static void test_sample_instructions(void) {&ps_sample_l, &rgba_texture, POINT, 0.0f, 0.0f, MIP_MAX, {-1.0f}, rgba_level_0}, {&ps_sample_l, &rgba_texture, POINT, 0.0f, 0.0f, MIP_MAX, {0.0f}, rgba_level_0}, {&ps_sample_l, &rgba_texture, POINT, 0.0f, 0.0f, MIP_MAX, {0.4f}, rgba_level_0}, + /* For POINT filtering, Direct3D specifies mip level selection + * essentially as "⌊λ + ½⌋" (7.18.10 "Mipmap Selection" in the + * Direct3D 11.3 Functional Specification). OpenGL and Vulkan allow + * both "⌈λ + ½⌉ - 1" and "⌊λ + ½⌋", with the former being preferred. + * Perhaps unsurprisingly, most desktop GPUs implement the Direct3D + * behaviour. Current versions of llvmpipe seem to implement the + * preferred OpenGL/Vulkan behaviour. */ {&ps_sample_l, &rgba_texture, POINT, 0.0f, 0.0f, MIP_MAX, {0.5f}, level_1_colors, true, true}, {&ps_sample_l, &rgba_texture, POINT, 0.0f, 0.0f, MIP_MAX, {0.6f}, level_1_colors}, {&ps_sample_l, &rgba_texture, POINT, 0.0f, 0.0f, MIP_MAX, {1.0f}, level_1_colors}, @@ -14360,7 +14367,7 @@ static void test_sample_instructions(void) break; } bug_if(tests[i].bug_on_mvk && is_mvk_device(device)) - bug_if(tests[i].bug_on_llvmpipe && is_llvmpipe_device(device)) + todo_if(tests[i].todo_on_llvmpipe && is_llvmpipe_device(device)) ok(!fail, "Got color 0x%08x, expected 0x%08x at (%u, %u).\n", color, tests[i].expected_data[tests[i].texture->width * y + x], x, y); release_resource_readback(&rb);