ddraw/tests: Make test_depth_readback work on Nvidia GPUs on Windows.
Signed-off-by: Stefan Dösinger <stefan@codeweavers.com> Signed-off-by: Henri Verbeet <hverbeet@codeweavers.com> Signed-off-by: Alexandre Julliard <julliard@winehq.org>
This commit is contained in:
parent
23800d67e0
commit
d877dce7a2
|
@ -10840,6 +10840,18 @@ static void test_depth_readback(void)
|
|||
ok(!!window, "Failed to create a window.\n");
|
||||
ddraw = create_ddraw();
|
||||
ok(!!ddraw, "Failed to create a ddraw object.\n");
|
||||
if (ddraw_is_nvidia(ddraw))
|
||||
{
|
||||
/* ddraw1 only has access to D16 Z buffers (and D24 ones, which are even more
|
||||
* broken on Nvidia), so don't even attempt to run this test on Nvidia cards
|
||||
* because some of them have broken D16 readback. See the ddraw7 version of
|
||||
* this test for a more detailed comment. */
|
||||
skip("Some Nvidia GPUs have broken D16 readback, skipping.\n");
|
||||
IDirectDraw_Release(ddraw);
|
||||
DestroyWindow(window);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(device = create_device(ddraw, window, DDSCL_NORMAL)))
|
||||
{
|
||||
skip("Failed to create a D3D device, skipping tests.\n");
|
||||
|
|
|
@ -12211,7 +12211,7 @@ static void test_set_render_state(void)
|
|||
|
||||
static void test_depth_readback(void)
|
||||
{
|
||||
DWORD depth, expected_depth, max_diff;
|
||||
DWORD depth, expected_depth, max_diff, passed_fmts = 0;
|
||||
IDirect3DMaterial2 *blue_background;
|
||||
IDirectDrawSurface *rt, *ds;
|
||||
IDirect3DViewport2 *viewport;
|
||||
|
@ -12223,6 +12223,7 @@ static void test_depth_readback(void)
|
|||
HWND window;
|
||||
HRESULT hr;
|
||||
void *ptr;
|
||||
BOOL all_pass;
|
||||
|
||||
static D3DRECT clear_rect = {{0}, {0}, {640}, {480}};
|
||||
static D3DLVERTEX quad[] =
|
||||
|
@ -12307,6 +12308,7 @@ static void test_depth_readback(void)
|
|||
hr = IDirectDrawSurface_Lock(ds, NULL, &surface_desc, DDLOCK_READONLY | DDLOCK_WAIT, NULL);
|
||||
ok(SUCCEEDED(hr), "Failed to lock surface, hr %#x.\n", hr);
|
||||
|
||||
all_pass = TRUE;
|
||||
for (y = 60; y < 480; y += 120)
|
||||
{
|
||||
for (x = 80; x < 640; x += 160)
|
||||
|
@ -12317,20 +12319,30 @@ static void test_depth_readback(void)
|
|||
depth = *((DWORD *)ptr) & tests[i].z_mask;
|
||||
expected_depth = (x * (0.9 / 640.0) + y * (0.1 / 480.0)) * tests[i].z_mask;
|
||||
max_diff = ((0.5f * 0.9f) / 640.0f) * tests[i].z_mask;
|
||||
ok(abs(expected_depth - depth) <= max_diff,
|
||||
/* The ddraw2 version of this test behaves similarly to the ddraw7 version on Nvidia GPUs,
|
||||
* except that we only have D16 (broken on geforce 9) and D24X8 (broken on geforce 7) available.
|
||||
* Accept all nvidia GPUs as broken here, but still expect one of the formats to pass. */
|
||||
ok(abs(expected_depth - depth) <= max_diff || ddraw_is_nvidia(ddraw),
|
||||
"Test %u: Got depth 0x%08x (diff %d), expected 0x%08x+/-%u, at %u, %u.\n",
|
||||
i, depth, expected_depth - depth, expected_depth, max_diff, x, y);
|
||||
if (abs(expected_depth - depth) > max_diff)
|
||||
all_pass = FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
hr = IDirectDrawSurface_Unlock(ds, NULL);
|
||||
ok(SUCCEEDED(hr), "Failed to unlock surface, hr %#x.\n", hr);
|
||||
|
||||
if (all_pass)
|
||||
passed_fmts++;
|
||||
|
||||
hr = IDirectDrawSurface_DeleteAttachedSurface(rt, 0, ds);
|
||||
ok(SUCCEEDED(hr), "Failed to detach depth buffer, hr %#x.\n", hr);
|
||||
IDirectDrawSurface_Release(ds);
|
||||
}
|
||||
|
||||
ok(passed_fmts, "Not a single format passed the tests, this is bad even by Nvidia's standards.\n");
|
||||
|
||||
destroy_viewport(device, viewport);
|
||||
destroy_material(blue_background);
|
||||
IDirectDrawSurface_Release(rt);
|
||||
|
|
|
@ -14313,7 +14313,7 @@ static void test_map_synchronisation(void)
|
|||
|
||||
static void test_depth_readback(void)
|
||||
{
|
||||
DWORD depth, expected_depth, max_diff;
|
||||
DWORD depth, expected_depth, max_diff, passed_fmts = 0;
|
||||
IDirectDrawSurface4 *rt, *ds;
|
||||
IDirect3DViewport3 *viewport;
|
||||
DDSURFACEDESC2 surface_desc;
|
||||
|
@ -14325,6 +14325,7 @@ static void test_depth_readback(void)
|
|||
HWND window;
|
||||
HRESULT hr;
|
||||
RECT r;
|
||||
BOOL all_pass;
|
||||
|
||||
static D3DRECT clear_rect = {{0}, {0}, {640}, {480}};
|
||||
static struct
|
||||
|
@ -14421,6 +14422,7 @@ static void test_depth_readback(void)
|
|||
hr = IDirect3DDevice3_EndScene(device);
|
||||
ok(SUCCEEDED(hr), "Failed to end scene, hr %#x.\n", hr);
|
||||
|
||||
all_pass = TRUE;
|
||||
for (y = 60; y < 480; y += 120)
|
||||
{
|
||||
for (x = 80; x < 640; x += 160)
|
||||
|
@ -14434,21 +14436,32 @@ static void test_depth_readback(void)
|
|||
depth = *((DWORD *)surface_desc.lpSurface) & tests[i].z_mask;
|
||||
expected_depth = (x * (0.9 / 640.0) + y * (0.1 / 480.0)) * tests[i].z_mask;
|
||||
max_diff = ((0.5f * 0.9f) / 640.0f) * tests[i].z_mask;
|
||||
/* The ddraw4 version of this test behaves similarly to the ddraw7 version on Nvidia GPUs,
|
||||
* except that Geforce 7 also returns garbage data in D24S8, whereas the ddraw7 version
|
||||
* returns 0 for that format. Give up on pre-filtering formats, accept Nvidia as generally
|
||||
* broken here, but still expect at least one format (D16 or D24X8 in practise) to pass. */
|
||||
todo_wine_if(tests[i].todo)
|
||||
ok(abs(expected_depth - depth) <= max_diff,
|
||||
"Test %u: Got depth 0x%08x (diff %d), expected 0x%08x+/-%u, at %u, %u.\n",
|
||||
i, depth, expected_depth - depth, expected_depth, max_diff, x, y);
|
||||
ok(abs(expected_depth - depth) <= max_diff || ddraw_is_nvidia(ddraw),
|
||||
"Test %u: Got depth 0x%08x (diff %d), expected 0x%08x+/-%u, at %u, %u.\n",
|
||||
i, depth, expected_depth - depth, expected_depth, max_diff, x, y);
|
||||
if (abs(expected_depth - depth) > max_diff)
|
||||
all_pass = FALSE;
|
||||
|
||||
hr = IDirectDrawSurface4_Unlock(ds, &r);
|
||||
ok(SUCCEEDED(hr), "Failed to unlock surface, hr %#x.\n", hr);
|
||||
}
|
||||
}
|
||||
|
||||
if (all_pass)
|
||||
passed_fmts++;
|
||||
|
||||
hr = IDirectDrawSurface4_DeleteAttachedSurface(rt, 0, ds);
|
||||
ok(SUCCEEDED(hr), "Failed to detach depth buffer, hr %#x.\n", hr);
|
||||
IDirectDrawSurface4_Release(ds);
|
||||
}
|
||||
|
||||
ok(passed_fmts, "Not a single format passed the tests, this is bad even by Nvidia's standards.\n");
|
||||
|
||||
destroy_viewport(device, viewport);
|
||||
IDirectDrawSurface4_Release(rt);
|
||||
IDirectDraw4_Release(ddraw);
|
||||
|
|
|
@ -13689,7 +13689,7 @@ done:
|
|||
|
||||
static void test_depth_readback(void)
|
||||
{
|
||||
DWORD depth, expected_depth, max_diff;
|
||||
DWORD depth, expected_depth, max_diff, raw_value, passed_fmts = 0;
|
||||
IDirectDrawSurface7 *rt, *ds;
|
||||
DDSURFACEDESC2 surface_desc;
|
||||
IDirect3DDevice7 *device;
|
||||
|
@ -13700,6 +13700,7 @@ static void test_depth_readback(void)
|
|||
HWND window;
|
||||
HRESULT hr;
|
||||
RECT r;
|
||||
BOOL all_zero, all_one, all_pass;
|
||||
|
||||
static struct
|
||||
{
|
||||
|
@ -13791,6 +13792,7 @@ static void test_depth_readback(void)
|
|||
hr = IDirect3DDevice7_EndScene(device);
|
||||
ok(SUCCEEDED(hr), "Failed to end scene, hr %#x.\n", hr);
|
||||
|
||||
all_zero = all_one = all_pass = TRUE;
|
||||
for (y = 60; y < 480; y += 120)
|
||||
{
|
||||
for (x = 80; x < 640; x += 160)
|
||||
|
@ -13801,24 +13803,49 @@ static void test_depth_readback(void)
|
|||
hr = IDirectDrawSurface7_Lock(ds, &r, &surface_desc, DDLOCK_READONLY, NULL);
|
||||
ok(SUCCEEDED(hr), "Failed to lock surface, hr %#x.\n", hr);
|
||||
|
||||
depth = *((DWORD *)surface_desc.lpSurface) & tests[i].z_mask;
|
||||
raw_value = *((DWORD *)surface_desc.lpSurface);
|
||||
if (raw_value)
|
||||
all_zero = FALSE;
|
||||
if (raw_value != 0x00ffffff)
|
||||
all_one = FALSE;
|
||||
|
||||
depth = raw_value & tests[i].z_mask;
|
||||
expected_depth = (x * (0.9 / 640.0) + y * (0.1 / 480.0)) * tests[i].z_mask;
|
||||
max_diff = ((0.5f * 0.9f) / 640.0f) * tests[i].z_mask;
|
||||
/* This test is very reliably on AMD, but fails in a number of interesting ways on Nvidia GPUs:
|
||||
*
|
||||
* Geforce 7 GPUs work only with D16. D24 and D24S8 return 0, D24X8 broken data.
|
||||
*
|
||||
* Geforce 9 GPUs return return broken data for D16 that resembles the expected data in
|
||||
* the lower 8 bits and has 0xff in the upper 8 bits. D24X8 works, D24 and D24S8 return
|
||||
* 0x00ffffff.
|
||||
*
|
||||
* Geforce GTX 650 has working D16 and D24, but D24S8 returns 0.
|
||||
*
|
||||
* Arx Fatalis is broken on the Geforce 9 in the same way it was broken in Wine (bug 43654).
|
||||
* The !tests[i].s_depth is supposed to rule out D16 on GF9 and D24X8 on GF7. */
|
||||
todo_wine_if(tests[i].todo)
|
||||
ok(abs(expected_depth - depth) <= max_diff,
|
||||
ok(abs(expected_depth - depth) <= max_diff
|
||||
|| (ddraw_is_nvidia(ddraw) && (all_zero || all_one || !tests[i].s_depth)),
|
||||
"Test %u: Got depth 0x%08x (diff %d), expected 0x%08x+/-%u, at %u, %u.\n",
|
||||
i, depth, expected_depth - depth, expected_depth, max_diff, x, y);
|
||||
if (abs(expected_depth - depth) > max_diff)
|
||||
all_pass = FALSE;
|
||||
|
||||
hr = IDirectDrawSurface7_Unlock(ds, &r);
|
||||
ok(SUCCEEDED(hr), "Failed to unlock surface, hr %#x.\n", hr);
|
||||
}
|
||||
}
|
||||
if (all_pass)
|
||||
passed_fmts++;
|
||||
|
||||
hr = IDirectDrawSurface7_DeleteAttachedSurface(rt, 0, ds);
|
||||
ok(SUCCEEDED(hr), "Failed to detach depth buffer, hr %#x.\n", hr);
|
||||
IDirectDrawSurface7_Release(ds);
|
||||
}
|
||||
|
||||
ok(passed_fmts, "Not a single format passed the tests, this is bad even by Nvidia's standards.\n");
|
||||
|
||||
IDirectDrawSurface7_Release(rt);
|
||||
IDirectDraw7_Release(ddraw);
|
||||
IDirect3D7_Release(d3d);
|
||||
|
|
Loading…
Reference in New Issue