wined3d: Use the chunk allocator for GL indirect draw buffers.

Signed-off-by: Zebediah Figura <zfigura@codeweavers.com>
Signed-off-by: Henri Verbeet <hverbeet@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
This commit is contained in:
Henri Verbeet 2022-02-01 15:28:29 -06:00 committed by Alexandre Julliard
parent 766617d6f0
commit 7c844cd3c9
4 changed files with 390 additions and 45 deletions

View File

@ -4283,6 +4283,7 @@ static void adapter_gl_destroy_device(struct wined3d_device *device)
struct wined3d_device_gl *device_gl = wined3d_device_gl(device); struct wined3d_device_gl *device_gl = wined3d_device_gl(device);
wined3d_device_cleanup(&device_gl->d); wined3d_device_cleanup(&device_gl->d);
heap_free(device_gl->retired_blocks);
heap_free(device_gl); heap_free(device_gl);
} }

View File

@ -2618,6 +2618,37 @@ static void wined3d_context_gl_poll_fences(struct wined3d_context_gl *context_gl
} }
} }
static void wined3d_context_gl_cleanup_resources(struct wined3d_context_gl *context_gl)
{
struct wined3d_device_gl *device_gl = wined3d_device_gl(context_gl->c.device);
struct wined3d_retired_block_gl *r, *blocks;
SIZE_T count, i = 0;
uint64_t id;
wined3d_context_gl_poll_fences(context_gl);
id = device_gl->completed_fence_id;
blocks = device_gl->retired_blocks;
count = device_gl->retired_block_count;
while (i < count)
{
r = &blocks[i];
if (r->fence_id > id)
{
++i;
continue;
}
wined3d_allocator_block_free(r->block);
if (i != --count)
*r = blocks[count];
else
++i;
}
device_gl->retired_block_count = count;
}
void wined3d_context_gl_wait_command_fence(struct wined3d_context_gl *context_gl, uint64_t id) void wined3d_context_gl_wait_command_fence(struct wined3d_context_gl *context_gl, uint64_t id)
{ {
struct wined3d_device_gl *device_gl = wined3d_device_gl(context_gl->c.device); struct wined3d_device_gl *device_gl = wined3d_device_gl(context_gl->c.device);
@ -2635,7 +2666,7 @@ void wined3d_context_gl_wait_command_fence(struct wined3d_context_gl *context_gl
if ((ret = wined3d_fence_wait(context_gl->submitted.fences[i].fence, &device_gl->d)) != WINED3D_FENCE_OK) if ((ret = wined3d_fence_wait(context_gl->submitted.fences[i].fence, &device_gl->d)) != WINED3D_FENCE_OK)
ERR("Failed to wait for command fence with id 0x%s, ret %#x.\n", wine_dbgstr_longlong(id), ret); ERR("Failed to wait for command fence with id 0x%s, ret %#x.\n", wine_dbgstr_longlong(id), ret);
wined3d_context_gl_poll_fences(context_gl); wined3d_context_gl_cleanup_resources(context_gl);
return; return;
} }
@ -2665,7 +2696,133 @@ void wined3d_context_gl_submit_command_fence(struct wined3d_context_gl *context_
device_gl->completed_fence_id = 0; device_gl->completed_fence_id = 0;
device_gl->current_fence_id = 1; device_gl->current_fence_id = 1;
} }
wined3d_context_gl_poll_fences(context_gl); wined3d_context_gl_cleanup_resources(context_gl);
}
static void wined3d_context_gl_destroy_allocator_block(struct wined3d_context_gl *context_gl,
struct wined3d_allocator_block *block, uint64_t fence_id)
{
struct wined3d_device_gl *device_gl = wined3d_device_gl(context_gl->c.device);
struct wined3d_retired_block_gl *r;
if (device_gl->completed_fence_id > fence_id)
{
wined3d_allocator_block_free(block);
TRACE("Freed block %p.\n", block);
return;
}
if (!wined3d_array_reserve((void **)&device_gl->retired_blocks,
&device_gl->retired_blocks_size, device_gl->retired_block_count + 1,
sizeof(*device_gl->retired_blocks)))
{
ERR("Leaking block %p.\n", block);
return;
}
r = &device_gl->retired_blocks[device_gl->retired_block_count++];
r->block = block;
r->fence_id = fence_id;
}
/* We always have buffer storage here. */
GLuint wined3d_context_gl_allocate_vram_chunk_buffer(struct wined3d_context_gl *context_gl,
unsigned int pool, size_t size)
{
const struct wined3d_gl_info *gl_info = context_gl->gl_info;
GLbitfield flags;
GLuint id = 0;
TRACE("context_gl %p, pool %u, size %zu.\n", context_gl, pool, size);
GL_EXTCALL(glGenBuffers(1, &id));
if (!id)
{
checkGLcall("buffer object creation");
return 0;
}
wined3d_context_gl_bind_bo(context_gl, GL_PIXEL_UNPACK_BUFFER, id);
flags = wined3d_device_gl_get_memory_type_flags(pool) | GL_DYNAMIC_STORAGE_BIT;
if (flags & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT))
flags |= GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT;
GL_EXTCALL(glBufferStorage(GL_PIXEL_UNPACK_BUFFER, size, NULL, flags));
checkGLcall("buffer object creation");
TRACE("Created buffer object %u.\n", id);
return id;
}
static struct wined3d_allocator_block *wined3d_context_gl_allocate_memory(struct wined3d_context_gl *context_gl,
unsigned int memory_type, GLsizeiptr size, GLuint *id)
{
struct wined3d_device_gl *device_gl = wined3d_device_gl(context_gl->c.device);
struct wined3d_allocator *allocator = &device_gl->allocator;
struct wined3d_allocator_block *block;
if (size > WINED3D_ALLOCATOR_CHUNK_SIZE / 2)
{
*id = wined3d_context_gl_allocate_vram_chunk_buffer(context_gl, memory_type, size);
return NULL;
}
if (!(block = wined3d_allocator_allocate(allocator, &context_gl->c, memory_type, size)))
{
*id = 0;
return NULL;
}
*id = wined3d_allocator_chunk_gl(block->chunk)->gl_buffer;
return block;
}
static void *wined3d_allocator_chunk_gl_map(struct wined3d_allocator_chunk_gl *chunk_gl,
struct wined3d_context_gl *context_gl)
{
const struct wined3d_gl_info *gl_info = context_gl->gl_info;
TRACE("chunk %p, gl_buffer %u, map_ptr %p.\n", chunk_gl, chunk_gl->gl_buffer, chunk_gl->c.map_ptr);
if (!chunk_gl->c.map_ptr)
{
unsigned int flags = wined3d_device_gl_get_memory_type_flags(chunk_gl->memory_type) & ~GL_CLIENT_STORAGE_BIT;
flags |= GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT;
if (!(flags & GL_MAP_READ_BIT))
flags |= GL_MAP_UNSYNCHRONIZED_BIT;
if (flags & GL_MAP_WRITE_BIT)
flags |= GL_MAP_FLUSH_EXPLICIT_BIT;
wined3d_context_gl_bind_bo(context_gl, GL_PIXEL_UNPACK_BUFFER, chunk_gl->gl_buffer);
chunk_gl->c.map_ptr = GL_EXTCALL(glMapBufferRange(GL_PIXEL_UNPACK_BUFFER,
0, WINED3D_ALLOCATOR_CHUNK_SIZE, flags));
if (!chunk_gl->c.map_ptr)
{
ERR("Failed to map chunk memory.\n");
return NULL;
}
}
++chunk_gl->c.map_count;
return chunk_gl->c.map_ptr;
}
static void wined3d_allocator_chunk_gl_unmap(struct wined3d_allocator_chunk_gl *chunk_gl,
struct wined3d_context_gl *context_gl)
{
const struct wined3d_gl_info *gl_info = context_gl->gl_info;
TRACE("chunk_gl %p, context_gl %p.\n", chunk_gl, context_gl);
if (!--chunk_gl->c.map_count && !wined3d_map_persistent())
{
wined3d_context_gl_bind_bo(context_gl, GL_PIXEL_UNPACK_BUFFER, chunk_gl->gl_buffer);
GL_EXTCALL(glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER));
chunk_gl->c.map_ptr = NULL;
}
} }
static void *wined3d_bo_gl_map(struct wined3d_bo_gl *bo, struct wined3d_context_gl *context_gl, uint32_t flags) static void *wined3d_bo_gl_map(struct wined3d_bo_gl *bo, struct wined3d_context_gl *context_gl, uint32_t flags)
@ -2708,6 +2865,15 @@ map:
if (bo->b.map_ptr) if (bo->b.map_ptr)
return bo->b.map_ptr; return bo->b.map_ptr;
if (bo->memory)
{
struct wined3d_allocator_chunk_gl *chunk_gl = wined3d_allocator_chunk_gl(bo->memory->chunk);
if (!(map_ptr = wined3d_allocator_chunk_gl_map(chunk_gl, context_gl)))
ERR("Failed to map chunk.\n");
return map_ptr;
}
gl_info = context_gl->gl_info; gl_info = context_gl->gl_info;
wined3d_context_gl_bind_bo(context_gl, bo->binding, bo->id); wined3d_context_gl_bind_bo(context_gl, bo->binding, bo->id);
@ -2766,7 +2932,12 @@ static void wined3d_bo_gl_unmap(struct wined3d_bo_gl *bo, struct wined3d_context
return; return;
wined3d_context_gl_bind_bo(context_gl, bo->binding, bo->id); wined3d_context_gl_bind_bo(context_gl, bo->binding, bo->id);
GL_EXTCALL(glUnmapBuffer(bo->binding));
if (bo->memory)
wined3d_allocator_chunk_gl_unmap(wined3d_allocator_chunk_gl(bo->memory->chunk), context_gl);
else
GL_EXTCALL(glUnmapBuffer(bo->binding));
wined3d_context_gl_bind_bo(context_gl, bo->binding, 0); wined3d_context_gl_bind_bo(context_gl, bo->binding, 0);
checkGLcall("Unmap buffer object"); checkGLcall("Unmap buffer object");
} }
@ -2920,7 +3091,23 @@ void wined3d_context_gl_destroy_bo(struct wined3d_context_gl *context_gl, struct
TRACE("context_gl %p, bo %p.\n", context_gl, bo); TRACE("context_gl %p, bo %p.\n", context_gl, bo);
if (bo->memory)
{
if (bo->b.map_ptr)
wined3d_allocator_chunk_gl_unmap(wined3d_allocator_chunk_gl(bo->memory->chunk), context_gl);
wined3d_context_gl_destroy_allocator_block(context_gl, bo->memory, bo->command_fence_id);
bo->id = 0;
return;
}
if (bo->b.map_ptr)
{
wined3d_context_gl_bind_bo(context_gl, bo->binding, bo->id);
GL_EXTCALL(glUnmapBuffer(bo->binding));
}
TRACE("Destroying GL buffer %u.\n", bo->id); TRACE("Destroying GL buffer %u.\n", bo->id);
GL_EXTCALL(glDeleteBuffers(1, &bo->id)); GL_EXTCALL(glDeleteBuffers(1, &bo->id));
checkGLcall("buffer object destruction"); checkGLcall("buffer object destruction");
bo->id = 0; bo->id = 0;
@ -2929,42 +3116,61 @@ void wined3d_context_gl_destroy_bo(struct wined3d_context_gl *context_gl, struct
bool wined3d_context_gl_create_bo(struct wined3d_context_gl *context_gl, GLsizeiptr size, bool wined3d_context_gl_create_bo(struct wined3d_context_gl *context_gl, GLsizeiptr size,
GLenum binding, GLenum usage, bool coherent, GLbitfield flags, struct wined3d_bo_gl *bo) GLenum binding, GLenum usage, bool coherent, GLbitfield flags, struct wined3d_bo_gl *bo)
{ {
unsigned int memory_type_idx = wined3d_device_gl_find_memory_type(flags);
const struct wined3d_gl_info *gl_info = context_gl->gl_info; const struct wined3d_gl_info *gl_info = context_gl->gl_info;
struct wined3d_allocator_block *memory = NULL;
GLsizeiptr buffer_offset = 0;
GLuint id = 0; GLuint id = 0;
TRACE("context_gl %p, size %lu, binding %#x, usage %#x, coherent %#x, flags %#x, bo %p.\n", TRACE("context_gl %p, size %lu, binding %#x, usage %#x, coherent %#x, flags %#x, bo %p.\n",
context_gl, size, binding, usage, coherent, flags, bo); context_gl, size, binding, usage, coherent, flags, bo);
GL_EXTCALL(glGenBuffers(1, &id));
if (!id)
{
checkGLcall("buffer object creation");
return false;
}
wined3d_context_gl_bind_bo(context_gl, binding, id);
if (!coherent && gl_info->supported[APPLE_FLUSH_BUFFER_RANGE])
{
GL_EXTCALL(glBufferParameteriAPPLE(binding, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE));
GL_EXTCALL(glBufferParameteriAPPLE(binding, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE));
}
if (gl_info->supported[ARB_BUFFER_STORAGE]) if (gl_info->supported[ARB_BUFFER_STORAGE])
{ {
if (flags & (GL_MAP_READ_BIT | GL_MAP_WRITE_BIT)) switch (binding)
flags |= GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT; {
GL_EXTCALL(glBufferStorage(binding, size, NULL, flags | GL_DYNAMIC_STORAGE_BIT)); case GL_DRAW_INDIRECT_BUFFER:
if ((memory = wined3d_context_gl_allocate_memory(context_gl, memory_type_idx, size, &id)))
buffer_offset = memory->offset;
break;
default:
WARN_(d3d_perf)("Not allocating chunk memory for binding type %#x.\n", binding);
id = wined3d_context_gl_allocate_vram_chunk_buffer(context_gl, memory_type_idx, size);
break;
}
if (!id)
{
WARN("Failed to allocate buffer.\n");
return false;
}
} }
else else
{ {
GL_EXTCALL(glBufferData(binding, size, NULL, usage)); GL_EXTCALL(glGenBuffers(1, &id));
} if (!id)
{
checkGLcall("buffer object creation");
return false;
}
wined3d_context_gl_bind_bo(context_gl, binding, id);
wined3d_context_gl_bind_bo(context_gl, binding, 0); if (!coherent && gl_info->supported[APPLE_FLUSH_BUFFER_RANGE])
checkGLcall("buffer object creation"); {
GL_EXTCALL(glBufferParameteriAPPLE(binding, GL_BUFFER_FLUSHING_UNMAP_APPLE, GL_FALSE));
GL_EXTCALL(glBufferParameteriAPPLE(binding, GL_BUFFER_SERIALIZED_MODIFY_APPLE, GL_FALSE));
}
GL_EXTCALL(glBufferData(binding, size, NULL, usage));
wined3d_context_gl_bind_bo(context_gl, binding, 0);
checkGLcall("buffer object creation");
}
TRACE("Created buffer object %u.\n", id); TRACE("Created buffer object %u.\n", id);
bo->id = id; bo->id = id;
bo->memory = memory;
bo->size = size; bo->size = size;
bo->binding = binding; bo->binding = binding;
bo->usage = usage; bo->usage = usage;
@ -2972,8 +3178,8 @@ bool wined3d_context_gl_create_bo(struct wined3d_context_gl *context_gl, GLsizei
bo->b.coherent = coherent; bo->b.coherent = coherent;
list_init(&bo->b.users); list_init(&bo->b.users);
bo->command_fence_id = 0; bo->command_fence_id = 0;
bo->b.memory_offset = 0; bo->b.buffer_offset = buffer_offset;
bo->b.buffer_offset = 0; bo->b.memory_offset = bo->b.buffer_offset;
bo->b.map_ptr = NULL; bo->b.map_ptr = NULL;
return true; return true;

View File

@ -955,6 +955,101 @@ static void device_init_swapchain_state(struct wined3d_device *device, struct wi
wined3d_device_context_set_depth_stencil_view(context, ds_enable ? device->auto_depth_stencil_view : NULL); wined3d_device_context_set_depth_stencil_view(context, ds_enable ? device->auto_depth_stencil_view : NULL);
} }
static struct wined3d_allocator_chunk *wined3d_allocator_gl_create_chunk(struct wined3d_allocator *allocator,
struct wined3d_context *context, unsigned int memory_type, size_t chunk_size)
{
struct wined3d_context_gl *context_gl = wined3d_context_gl(context);
struct wined3d_allocator_chunk_gl *chunk_gl;
TRACE("allocator %p, context %p, memory_type %u, chunk_size %zu.\n", allocator, context, memory_type, chunk_size);
if (!(chunk_gl = heap_alloc(sizeof(*chunk_gl))))
return NULL;
if (!wined3d_allocator_chunk_init(&chunk_gl->c, allocator))
{
heap_free(chunk_gl);
return NULL;
}
chunk_gl->memory_type = memory_type;
if (!(chunk_gl->gl_buffer = wined3d_context_gl_allocate_vram_chunk_buffer(context_gl, memory_type, chunk_size)))
{
wined3d_allocator_chunk_cleanup(&chunk_gl->c);
heap_free(chunk_gl);
return NULL;
}
list_add_head(&allocator->pools[memory_type].chunks, &chunk_gl->c.entry);
return &chunk_gl->c;
}
static void wined3d_allocator_gl_destroy_chunk(struct wined3d_allocator_chunk *chunk)
{
struct wined3d_allocator_chunk_gl *chunk_gl = wined3d_allocator_chunk_gl(chunk);
const struct wined3d_gl_info *gl_info;
struct wined3d_context_gl *context_gl;
struct wined3d_device_gl *device_gl;
TRACE("chunk %p.\n", chunk);
device_gl = CONTAINING_RECORD(chunk_gl->c.allocator, struct wined3d_device_gl, allocator);
context_gl = wined3d_context_gl(context_acquire(&device_gl->d, NULL, 0));
gl_info = context_gl->gl_info;
wined3d_context_gl_bind_bo(context_gl, GL_PIXEL_UNPACK_BUFFER, chunk_gl->gl_buffer);
if (chunk_gl->c.map_ptr)
GL_EXTCALL(glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER));
GL_EXTCALL(glDeleteBuffers(1, &chunk_gl->gl_buffer));
TRACE("Freed buffer %u.\n", chunk_gl->gl_buffer);
wined3d_allocator_chunk_cleanup(&chunk_gl->c);
heap_free(chunk_gl);
context_release(&context_gl->c);
}
static const struct wined3d_allocator_ops wined3d_allocator_gl_ops =
{
.allocator_create_chunk = wined3d_allocator_gl_create_chunk,
.allocator_destroy_chunk = wined3d_allocator_gl_destroy_chunk,
};
static const struct
{
GLbitfield flags;
}
gl_memory_types[] =
{
{0},
{GL_MAP_READ_BIT},
{GL_MAP_WRITE_BIT},
{GL_MAP_READ_BIT | GL_MAP_WRITE_BIT},
{GL_CLIENT_STORAGE_BIT},
{GL_CLIENT_STORAGE_BIT | GL_MAP_READ_BIT},
{GL_CLIENT_STORAGE_BIT | GL_MAP_WRITE_BIT},
{GL_CLIENT_STORAGE_BIT | GL_MAP_READ_BIT | GL_MAP_WRITE_BIT},
};
unsigned int wined3d_device_gl_find_memory_type(GLbitfield flags)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(gl_memory_types); ++i)
{
if (gl_memory_types[i].flags == flags)
return i;
}
assert(0);
return 0;
}
GLbitfield wined3d_device_gl_get_memory_type_flags(unsigned int memory_type_idx)
{
return gl_memory_types[memory_type_idx].flags;
}
void wined3d_device_gl_delete_opengl_contexts_cs(void *object) void wined3d_device_gl_delete_opengl_contexts_cs(void *object)
{ {
struct wined3d_device_gl *device_gl = object; struct wined3d_device_gl *device_gl = object;
@ -978,6 +1073,12 @@ void wined3d_device_gl_delete_opengl_contexts_cs(void *object)
device->blitter->ops->blitter_destroy(device->blitter, context); device->blitter->ops->blitter_destroy(device->blitter, context);
device->shader_backend->shader_free_private(device, context); device->shader_backend->shader_free_private(device, context);
wined3d_device_gl_destroy_dummy_textures(device_gl, context_gl); wined3d_device_gl_destroy_dummy_textures(device_gl, context_gl);
wined3d_context_gl_submit_command_fence(context_gl);
wined3d_context_gl_wait_command_fence(context_gl,
wined3d_device_gl(context_gl->c.device)->current_fence_id - 1);
wined3d_allocator_cleanup(&device_gl->allocator);
context_release(context); context_release(context);
while (device->context_count) while (device->context_count)
@ -1010,10 +1111,18 @@ void wined3d_device_gl_create_primary_opengl_context_cs(void *object)
return; return;
} }
if (!wined3d_allocator_init(&device_gl->allocator, ARRAY_SIZE(gl_memory_types), &wined3d_allocator_gl_ops))
{
WARN("Failed to initialise allocator.\n");
context_release(context);
return;
}
if (FAILED(hr = device->shader_backend->shader_alloc_private(device, if (FAILED(hr = device->shader_backend->shader_alloc_private(device,
device->adapter->vertex_pipe, device->adapter->fragment_pipe))) device->adapter->vertex_pipe, device->adapter->fragment_pipe)))
{ {
ERR("Failed to allocate shader private data, hr %#x.\n", hr); ERR("Failed to allocate shader private data, hr %#x.\n", hr);
wined3d_allocator_cleanup(&device_gl->allocator);
context_release(context); context_release(context);
return; return;
} }
@ -1022,6 +1131,7 @@ void wined3d_device_gl_create_primary_opengl_context_cs(void *object)
{ {
ERR("Failed to create CPU blitter.\n"); ERR("Failed to create CPU blitter.\n");
device->shader_backend->shader_free_private(device, NULL); device->shader_backend->shader_free_private(device, NULL);
wined3d_allocator_cleanup(&device_gl->allocator);
context_release(context); context_release(context);
return; return;
} }

View File

@ -1617,6 +1617,9 @@ struct wined3d_bo_gl
struct wined3d_bo b; struct wined3d_bo b;
GLuint id; GLuint id;
struct wined3d_allocator_block *memory;
GLsizeiptr size; GLsizeiptr size;
GLenum binding; GLenum binding;
GLenum usage; GLenum usage;
@ -2355,6 +2358,8 @@ void wined3d_context_gl_alloc_so_statistics_query(struct wined3d_context_gl *con
struct wined3d_so_statistics_query *query) DECLSPEC_HIDDEN; struct wined3d_so_statistics_query *query) DECLSPEC_HIDDEN;
void wined3d_context_gl_alloc_timestamp_query(struct wined3d_context_gl *context_gl, void wined3d_context_gl_alloc_timestamp_query(struct wined3d_context_gl *context_gl,
struct wined3d_timestamp_query *query) DECLSPEC_HIDDEN; struct wined3d_timestamp_query *query) DECLSPEC_HIDDEN;
GLuint wined3d_context_gl_allocate_vram_chunk_buffer(struct wined3d_context_gl *context_gl,
unsigned int pool, size_t size) DECLSPEC_HIDDEN;
void wined3d_context_gl_apply_blit_state(struct wined3d_context_gl *context_gl, void wined3d_context_gl_apply_blit_state(struct wined3d_context_gl *context_gl,
const struct wined3d_device *device) DECLSPEC_HIDDEN; const struct wined3d_device *device) DECLSPEC_HIDDEN;
BOOL wined3d_context_gl_apply_clear_state(struct wined3d_context_gl *context_gl, const struct wined3d_state *state, BOOL wined3d_context_gl_apply_clear_state(struct wined3d_context_gl *context_gl, const struct wined3d_state *state,
@ -3956,25 +3961,6 @@ static inline struct wined3d_device_no3d *wined3d_device_no3d(struct wined3d_dev
return CONTAINING_RECORD(device, struct wined3d_device_no3d, d); return CONTAINING_RECORD(device, struct wined3d_device_no3d, d);
} }
struct wined3d_device_gl
{
struct wined3d_device d;
/* Textures for when no other textures are bound. */
struct wined3d_dummy_textures dummy_textures;
uint64_t completed_fence_id;
uint64_t current_fence_id;
};
static inline struct wined3d_device_gl *wined3d_device_gl(struct wined3d_device *device)
{
return CONTAINING_RECORD(device, struct wined3d_device_gl, d);
}
void wined3d_device_gl_create_primary_opengl_context_cs(void *object) DECLSPEC_HIDDEN;
void wined3d_device_gl_delete_opengl_contexts_cs(void *object) DECLSPEC_HIDDEN;
struct wined3d_null_resources_vk struct wined3d_null_resources_vk
{ {
struct wined3d_bo_vk bo; struct wined3d_bo_vk bo;
@ -4021,6 +4007,18 @@ void wined3d_allocator_chunk_cleanup(struct wined3d_allocator_chunk *chunk) DECL
bool wined3d_allocator_chunk_init(struct wined3d_allocator_chunk *chunk, bool wined3d_allocator_chunk_init(struct wined3d_allocator_chunk *chunk,
struct wined3d_allocator *allocator) DECLSPEC_HIDDEN; struct wined3d_allocator *allocator) DECLSPEC_HIDDEN;
struct wined3d_allocator_chunk_gl
{
struct wined3d_allocator_chunk c;
unsigned int memory_type;
GLuint gl_buffer;
};
static inline struct wined3d_allocator_chunk_gl *wined3d_allocator_chunk_gl(struct wined3d_allocator_chunk *chunk)
{
return CONTAINING_RECORD(chunk, struct wined3d_allocator_chunk_gl, c);
}
struct wined3d_allocator_chunk_vk struct wined3d_allocator_chunk_vk
{ {
struct wined3d_allocator_chunk c; struct wined3d_allocator_chunk c;
@ -4155,6 +4153,36 @@ void wined3d_device_vk_destroy_null_views(struct wined3d_device_vk *device_vk,
void wined3d_device_vk_uav_clear_state_init(struct wined3d_device_vk *device_vk) DECLSPEC_HIDDEN; void wined3d_device_vk_uav_clear_state_init(struct wined3d_device_vk *device_vk) DECLSPEC_HIDDEN;
void wined3d_device_vk_uav_clear_state_cleanup(struct wined3d_device_vk *device_vk) DECLSPEC_HIDDEN; void wined3d_device_vk_uav_clear_state_cleanup(struct wined3d_device_vk *device_vk) DECLSPEC_HIDDEN;
struct wined3d_device_gl
{
struct wined3d_device d;
/* Textures for when no other textures are bound. */
struct wined3d_dummy_textures dummy_textures;
struct wined3d_allocator allocator;
uint64_t completed_fence_id;
uint64_t current_fence_id;
struct wined3d_retired_block_gl
{
struct wined3d_allocator_block *block;
uint64_t fence_id;
} *retired_blocks;
SIZE_T retired_blocks_size;
SIZE_T retired_block_count;
};
static inline struct wined3d_device_gl *wined3d_device_gl(struct wined3d_device *device)
{
return CONTAINING_RECORD(device, struct wined3d_device_gl, d);
}
void wined3d_device_gl_create_primary_opengl_context_cs(void *object) DECLSPEC_HIDDEN;
void wined3d_device_gl_delete_opengl_contexts_cs(void *object) DECLSPEC_HIDDEN;
unsigned int wined3d_device_gl_find_memory_type(GLbitfield flags) DECLSPEC_HIDDEN;
GLbitfield wined3d_device_gl_get_memory_type_flags(unsigned int memory_type_idx) DECLSPEC_HIDDEN;
static inline float wined3d_alpha_ref(const struct wined3d_state *state) static inline float wined3d_alpha_ref(const struct wined3d_state *state)
{ {
return (state->render_states[WINED3D_RS_ALPHAREF] & 0xff) / 255.0f; return (state->render_states[WINED3D_RS_ALPHAREF] & 0xff) / 255.0f;