wined3d: Protect wined3d_chunk_vk map fields with a mutex.

So as to allow chunks to be mapped from the client thread.

Signed-off-by: Zebediah Figura <zfigura@codeweavers.com>
Signed-off-by: Henri Verbeet <hverbeet@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
This commit is contained in:
Zebediah Figura 2021-11-04 22:34:29 -05:00 committed by Alexandre Julliard
parent 6f292cf37b
commit a5efc1d5e0
3 changed files with 30 additions and 2 deletions

View File

@ -380,7 +380,7 @@ static void wined3d_allocator_vk_destroy_chunk(struct wined3d_allocator_chunk *c
TRACE("chunk %p.\n", chunk);
device_vk = CONTAINING_RECORD(chunk_vk->c.allocator, struct wined3d_device_vk, allocator);
device_vk = wined3d_device_vk_from_allocator(chunk_vk->c.allocator);
vk_info = &device_vk->vk_info;
if (chunk_vk->c.map_ptr)

View File

@ -247,16 +247,29 @@ static VkStencilOp vk_stencil_op_from_wined3d(enum wined3d_stencil_op op)
}
}
static void wined3d_allocator_chunk_vk_lock(struct wined3d_allocator_chunk_vk *chunk_vk)
{
wined3d_device_vk_allocator_lock(wined3d_device_vk_from_allocator(chunk_vk->c.allocator));
}
static void wined3d_allocator_chunk_vk_unlock(struct wined3d_allocator_chunk_vk *chunk_vk)
{
wined3d_device_vk_allocator_unlock(wined3d_device_vk_from_allocator(chunk_vk->c.allocator));
}
void *wined3d_allocator_chunk_vk_map(struct wined3d_allocator_chunk_vk *chunk_vk,
struct wined3d_context_vk *context_vk)
{
struct wined3d_device_vk *device_vk = wined3d_device_vk(context_vk->c.device);
const struct wined3d_vk_info *vk_info = context_vk->vk_info;
void *map_ptr;
VkResult vr;
TRACE("chunk %p, memory 0x%s, map_ptr %p.\n", chunk_vk,
wine_dbgstr_longlong(chunk_vk->vk_memory), chunk_vk->c.map_ptr);
wined3d_allocator_chunk_vk_lock(chunk_vk);
if (!chunk_vk->c.map_ptr && (vr = VK_CALL(vkMapMemory(device_vk->vk_device,
chunk_vk->vk_memory, 0, VK_WHOLE_SIZE, 0, &chunk_vk->c.map_ptr))) < 0)
{
@ -265,8 +278,11 @@ void *wined3d_allocator_chunk_vk_map(struct wined3d_allocator_chunk_vk *chunk_vk
}
++chunk_vk->c.map_count;
map_ptr = chunk_vk->c.map_ptr;
return chunk_vk->c.map_ptr;
wined3d_allocator_chunk_vk_unlock(chunk_vk);
return map_ptr;
}
void wined3d_allocator_chunk_vk_unmap(struct wined3d_allocator_chunk_vk *chunk_vk,
@ -277,11 +293,18 @@ void wined3d_allocator_chunk_vk_unmap(struct wined3d_allocator_chunk_vk *chunk_v
TRACE("chunk_vk %p, context_vk %p.\n", chunk_vk, context_vk);
wined3d_allocator_chunk_vk_lock(chunk_vk);
if (--chunk_vk->c.map_count)
{
wined3d_allocator_chunk_vk_unlock(chunk_vk);
return;
}
VK_CALL(vkUnmapMemory(device_vk->vk_device, chunk_vk->vk_memory));
chunk_vk->c.map_ptr = NULL;
wined3d_allocator_chunk_vk_unlock(chunk_vk);
}
VkDeviceMemory wined3d_context_vk_allocate_vram_chunk_memory(struct wined3d_context_vk *context_vk,

View File

@ -4101,6 +4101,11 @@ static inline struct wined3d_device_vk *wined3d_device_vk(struct wined3d_device
return CONTAINING_RECORD(device, struct wined3d_device_vk, d);
}
static inline struct wined3d_device_vk *wined3d_device_vk_from_allocator(struct wined3d_allocator *allocator)
{
return CONTAINING_RECORD(allocator, struct wined3d_device_vk, allocator);
}
static inline void wined3d_device_vk_allocator_lock(struct wined3d_device_vk *device_vk)
{
EnterCriticalSection(&device_vk->allocator_cs);