wined3d: Allow up to 128 MiB of persistently mapped memory on 32-bit architectures.

Signed-off-by: Zebediah Figura <zfigura@codeweavers.com>
Signed-off-by: Henri Verbeet <hverbeet@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
This commit is contained in:
Zebediah Figura 2022-02-11 19:42:01 -06:00 committed by Alexandre Julliard
parent 899ae42b20
commit eb90d5db4f
5 changed files with 87 additions and 36 deletions

View File

@ -381,7 +381,10 @@ static void wined3d_allocator_vk_destroy_chunk(struct wined3d_allocator_chunk *c
vk_info = &device_vk->vk_info;
if (chunk_vk->c.map_ptr)
{
VK_CALL(vkUnmapMemory(device_vk->vk_device, chunk_vk->vk_memory));
adapter_adjust_mapped_memory(device_vk->d.adapter, -WINED3D_ALLOCATOR_CHUNK_SIZE);
}
VK_CALL(vkFreeMemory(device_vk->vk_device, chunk_vk->vk_memory, NULL));
TRACE("Freed memory 0x%s.\n", wine_dbgstr_longlong(chunk_vk->vk_memory));
wined3d_allocator_chunk_cleanup(&chunk_vk->c);
@ -801,10 +804,15 @@ static void *wined3d_bo_vk_map(struct wined3d_bo_vk *bo, struct wined3d_context_
return NULL;
}
}
else if ((vr = VK_CALL(vkMapMemory(device_vk->vk_device, bo->vk_memory, 0, VK_WHOLE_SIZE, 0, &bo->b.map_ptr))) < 0)
else
{
ERR("Failed to map memory, vr %s.\n", wined3d_debug_vkresult(vr));
return NULL;
if ((vr = VK_CALL(vkMapMemory(device_vk->vk_device, bo->vk_memory, 0, VK_WHOLE_SIZE, 0, &bo->b.map_ptr))) < 0)
{
ERR("Failed to map memory, vr %s.\n", wined3d_debug_vkresult(vr));
return NULL;
}
adapter_adjust_mapped_memory(device_vk->d.adapter, bo->size);
}
return bo->b.map_ptr;
@ -812,12 +820,16 @@ static void *wined3d_bo_vk_map(struct wined3d_bo_vk *bo, struct wined3d_context_
static void wined3d_bo_vk_unmap(struct wined3d_bo_vk *bo, struct wined3d_context_vk *context_vk)
{
struct wined3d_device_vk *device_vk = wined3d_device_vk(context_vk->c.device);
const struct wined3d_vk_info *vk_info;
struct wined3d_device_vk *device_vk;
struct wined3d_bo_slab_vk *slab;
if (wined3d_map_persistent())
/* This may race with the client thread, but it's not a hard limit anyway. */
if (device_vk->d.adapter->mapped_size <= MAX_PERSISTENT_MAPPED_BYTES)
{
TRACE("Not unmapping BO %p.\n", bo);
return;
}
bo->b.map_ptr = NULL;
@ -834,8 +846,8 @@ static void wined3d_bo_vk_unmap(struct wined3d_bo_vk *bo, struct wined3d_context
}
vk_info = context_vk->vk_info;
device_vk = wined3d_device_vk(context_vk->c.device);
VK_CALL(vkUnmapMemory(device_vk->vk_device, bo->vk_memory));
adapter_adjust_mapped_memory(device_vk->d.adapter, -bo->size);
}
static void wined3d_bo_slab_vk_lock(struct wined3d_bo_slab_vk *slab_vk, struct wined3d_context_vk *context_vk)

View File

@ -2814,6 +2814,8 @@ static void *wined3d_allocator_chunk_gl_map(struct wined3d_allocator_chunk_gl *c
ERR("Failed to map chunk memory.\n");
return NULL;
}
adapter_adjust_mapped_memory(context_gl->c.device->adapter, WINED3D_ALLOCATOR_CHUNK_SIZE);
}
++chunk_gl->c.map_count;
@ -2828,12 +2830,14 @@ static void wined3d_allocator_chunk_gl_unmap(struct wined3d_allocator_chunk_gl *
TRACE("chunk_gl %p, context_gl %p.\n", chunk_gl, context_gl);
if (!--chunk_gl->c.map_count && !wined3d_map_persistent())
{
wined3d_context_gl_bind_bo(context_gl, GL_PIXEL_UNPACK_BUFFER, chunk_gl->gl_buffer);
GL_EXTCALL(glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER));
chunk_gl->c.map_ptr = NULL;
}
if (--chunk_gl->c.map_count)
return;
wined3d_context_gl_bind_bo(context_gl, GL_PIXEL_UNPACK_BUFFER, chunk_gl->gl_buffer);
GL_EXTCALL(glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER));
chunk_gl->c.map_ptr = NULL;
adapter_adjust_mapped_memory(context_gl->c.device->adapter, -WINED3D_ALLOCATOR_CHUNK_SIZE);
}
static void *wined3d_bo_gl_map(struct wined3d_bo_gl *bo, struct wined3d_context_gl *context_gl, uint32_t flags)
@ -2902,18 +2906,11 @@ map:
* resources are mapped. On the other hand, we don't want to use the
* access flags used to create the bo for non-persistent maps, because
* that may imply dropping GL_MAP_UNSYNCHRONIZED_BIT. */
if (wined3d_map_persistent())
{
gl_flags = bo->flags & ~GL_CLIENT_STORAGE_BIT;
if (!(gl_flags & GL_MAP_READ_BIT))
gl_flags |= GL_MAP_UNSYNCHRONIZED_BIT;
if (gl_flags & GL_MAP_WRITE_BIT)
gl_flags |= GL_MAP_FLUSH_EXPLICIT_BIT;
}
else
{
gl_flags = wined3d_resource_gl_map_flags(bo, flags);
}
gl_flags = bo->flags & ~GL_CLIENT_STORAGE_BIT;
if (!(gl_flags & GL_MAP_READ_BIT))
gl_flags |= GL_MAP_UNSYNCHRONIZED_BIT;
if (gl_flags & GL_MAP_WRITE_BIT)
gl_flags |= GL_MAP_FLUSH_EXPLICIT_BIT;
gl_flags |= GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT;
bo->b.map_ptr = GL_EXTCALL(glMapBufferRange(bo->binding, 0, bo->size, gl_flags));
@ -2927,6 +2924,9 @@ map:
bo->b.map_ptr = GL_EXTCALL(glMapBuffer(bo->binding, wined3d_resource_gl_legacy_map_flags(flags)));
}
if (bo->b.map_ptr)
adapter_adjust_mapped_memory(device_gl->d.adapter, bo->size);
wined3d_context_gl_bind_bo(context_gl, bo->binding, 0);
checkGLcall("Map buffer object");
@ -2937,20 +2937,30 @@ static void wined3d_bo_gl_unmap(struct wined3d_bo_gl *bo, struct wined3d_context
{
const struct wined3d_gl_info *gl_info = context_gl->gl_info;
if (wined3d_map_persistent())
if (context_gl->c.device->adapter->mapped_size <= MAX_PERSISTENT_MAPPED_BYTES)
{
TRACE("Not unmapping BO %p.\n", bo);
return;
}
if (bo->memory)
{
struct wined3d_allocator_chunk_gl *chunk_gl = wined3d_allocator_chunk_gl(bo->memory->chunk);
wined3d_allocator_chunk_gl_unmap(chunk_gl, context_gl);
if (!chunk_gl->c.map_ptr)
bo->b.map_ptr = NULL;
return;
}
bo->b.map_ptr = NULL;
wined3d_context_gl_bind_bo(context_gl, bo->binding, bo->id);
if (bo->memory)
wined3d_allocator_chunk_gl_unmap(wined3d_allocator_chunk_gl(bo->memory->chunk), context_gl);
else
GL_EXTCALL(glUnmapBuffer(bo->binding));
GL_EXTCALL(glUnmapBuffer(bo->binding));
wined3d_context_gl_bind_bo(context_gl, bo->binding, 0);
checkGLcall("Unmap buffer object");
adapter_adjust_mapped_memory(context_gl->c.device->adapter, -bo->size);
}
void *wined3d_context_gl_map_bo_address(struct wined3d_context_gl *context_gl,
@ -3115,6 +3125,7 @@ void wined3d_context_gl_destroy_bo(struct wined3d_context_gl *context_gl, struct
{
wined3d_context_gl_bind_bo(context_gl, bo->binding, bo->id);
GL_EXTCALL(glUnmapBuffer(bo->binding));
adapter_adjust_mapped_memory(context_gl->c.device->adapter, -bo->size);
}
TRACE("Destroying GL buffer %u.\n", bo->id);

View File

@ -269,12 +269,17 @@ void *wined3d_allocator_chunk_vk_map(struct wined3d_allocator_chunk_vk *chunk_vk
wined3d_allocator_chunk_vk_lock(chunk_vk);
if (!chunk_vk->c.map_ptr && (vr = VK_CALL(vkMapMemory(device_vk->vk_device,
chunk_vk->vk_memory, 0, VK_WHOLE_SIZE, 0, &chunk_vk->c.map_ptr))) < 0)
if (!chunk_vk->c.map_ptr)
{
ERR("Failed to map chunk memory, vr %s.\n", wined3d_debug_vkresult(vr));
wined3d_allocator_chunk_vk_unlock(chunk_vk);
return NULL;
if ((vr = VK_CALL(vkMapMemory(device_vk->vk_device,
chunk_vk->vk_memory, 0, VK_WHOLE_SIZE, 0, &chunk_vk->c.map_ptr))) < 0)
{
ERR("Failed to map chunk memory, vr %s.\n", wined3d_debug_vkresult(vr));
wined3d_allocator_chunk_vk_unlock(chunk_vk);
return NULL;
}
adapter_adjust_mapped_memory(device_vk->d.adapter, WINED3D_ALLOCATOR_CHUNK_SIZE);
}
++chunk_vk->c.map_count;
@ -305,6 +310,8 @@ void wined3d_allocator_chunk_vk_unmap(struct wined3d_allocator_chunk_vk *chunk_v
chunk_vk->c.map_ptr = NULL;
wined3d_allocator_chunk_vk_unlock(chunk_vk);
adapter_adjust_mapped_memory(device_vk->d.adapter, -WINED3D_ALLOCATOR_CHUNK_SIZE);
}
VkDeviceMemory wined3d_context_vk_allocate_vram_chunk_memory(struct wined3d_context_vk *context_vk,
@ -1012,7 +1019,10 @@ void wined3d_context_vk_destroy_bo(struct wined3d_context_vk *context_vk, const
}
if (bo->b.map_ptr)
{
VK_CALL(vkUnmapMemory(device_vk->vk_device, bo->vk_memory));
adapter_adjust_mapped_memory(device_vk->d.adapter, -bo->size);
}
wined3d_context_vk_destroy_vk_memory(context_vk, bo->vk_memory, bo->command_buffer_id);
}

View File

@ -157,6 +157,15 @@ UINT64 adapter_adjust_memory(struct wined3d_adapter *adapter, INT64 amount)
return adapter->vram_bytes_used;
}
ssize_t adapter_adjust_mapped_memory(struct wined3d_adapter *adapter, ssize_t size)
{
/* Note that this needs to be thread-safe; the Vulkan adapter may map from
* client threads. */
ssize_t ret = InterlockedExchangeAddSizeT(&adapter->mapped_size, size) + size;
TRACE("Adjusted mapped adapter memory by %zd to %zd.\n", size, ret);
return ret;
}
void wined3d_adapter_cleanup(struct wined3d_adapter *adapter)
{
unsigned int output_idx;

View File

@ -3470,6 +3470,12 @@ struct wined3d_output
HRESULT wined3d_output_get_gamma_ramp(struct wined3d_output *output, struct wined3d_gamma_ramp *ramp) DECLSPEC_HIDDEN;
#ifdef _WIN64
#define MAX_PERSISTENT_MAPPED_BYTES SSIZE_MAX
#else
#define MAX_PERSISTENT_MAPPED_BYTES (128 * 1024 * 1024)
#endif
/* The adapter structure */
struct wined3d_adapter
{
@ -3488,6 +3494,8 @@ struct wined3d_adapter
void *formats;
size_t format_size;
ssize_t mapped_size;
const struct wined3d_vertex_pipe_ops *vertex_pipe;
const struct wined3d_fragment_pipe_ops *fragment_pipe;
const struct wined3d_state_entry_template *misc_state_template;
@ -3567,6 +3575,7 @@ BOOL wined3d_adapter_gl_init_format_info(struct wined3d_adapter *adapter,
BOOL wined3d_adapter_no3d_init_format_info(struct wined3d_adapter *adapter) DECLSPEC_HIDDEN;
BOOL wined3d_adapter_vk_init_format_info(struct wined3d_adapter_vk *adapter_vk,
const struct wined3d_vk_info *vk_info) DECLSPEC_HIDDEN;
ssize_t adapter_adjust_mapped_memory(struct wined3d_adapter *adapter, ssize_t size) DECLSPEC_HIDDEN;
UINT64 adapter_adjust_memory(struct wined3d_adapter *adapter, INT64 amount) DECLSPEC_HIDDEN;
BOOL wined3d_caps_gl_ctx_test_viewport_subpixel_bits(struct wined3d_caps_gl_ctx *ctx) DECLSPEC_HIDDEN;