/* * Context and render target management in wined3d * * Copyright 2002-2004 Jason Edmeades * Copyright 2002-2004 Raphael Junqueira * Copyright 2004 Christian Costa * Copyright 2005 Oliver Stieber * Copyright 2006, 2008 Henri Verbeet * Copyright 2007-2011, 2013 Stefan Dösinger for CodeWeavers * Copyright 2009-2011 Henri Verbeet for CodeWeavers * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #include "config.h" #include "wine/port.h" #include "wined3d_private.h" WINE_DEFAULT_DEBUG_CHANNEL(d3d); WINE_DECLARE_DEBUG_CHANNEL(d3d_perf); void context_resource_released(const struct wined3d_device *device, struct wined3d_resource *resource) { unsigned int i; for (i = 0; i < device->context_count; ++i) { struct wined3d_context *context = device->contexts[i]; if (&context->current_rt.texture->resource == resource) { context->current_rt.texture = NULL; context->current_rt.sub_resource_idx = 0; } } } void wined3d_context_cleanup(struct wined3d_context *context) { } /* This is used when a context for render target A is active, but a separate context is * needed to access the WGL framebuffer for render target B. Re-acquire a context for rt * A to avoid breaking caller code. */ void context_restore(struct wined3d_context *context, struct wined3d_texture *texture, unsigned int sub_resource_idx) { if (context->current_rt.texture != texture || context->current_rt.sub_resource_idx != sub_resource_idx) { context_release(context); context = context_acquire(texture->resource.device, texture, sub_resource_idx); } context_release(context); } void context_invalidate_compute_state(struct wined3d_context *context, DWORD state_id) { DWORD representative = context->state_table[state_id].representative - STATE_COMPUTE_OFFSET; unsigned int index, shift; index = representative / (sizeof(*context->dirty_compute_states) * CHAR_BIT); shift = representative & (sizeof(*context->dirty_compute_states) * CHAR_BIT - 1); context->dirty_compute_states[index] |= (1u << shift); } void context_invalidate_state(struct wined3d_context *context, unsigned int state_id) { unsigned int representative = context->state_table[state_id].representative; unsigned int index, shift; index = representative / (sizeof(*context->dirty_graphics_states) * CHAR_BIT); shift = representative & ((sizeof(*context->dirty_graphics_states) * CHAR_BIT) - 1); context->dirty_graphics_states[index] |= (1u << shift); } void wined3d_context_init(struct wined3d_context *context, struct wined3d_swapchain *swapchain) { struct wined3d_device *device = swapchain->device; DWORD state; context->d3d_info = &device->adapter->d3d_info; context->state_table = device->state_table; /* Mark all states dirty to force a proper initialization of the states on * the first use of the context. Compute states do not need initialization. */ for (state = 0; state <= STATE_HIGHEST; ++state) { if (context->state_table[state].representative && !STATE_IS_COMPUTE(state)) context_invalidate_state(context, state); } context->device = device; context->swapchain = swapchain; context->current_rt.texture = swapchain->front_buffer; context->current_rt.sub_resource_idx = 0; context->shader_update_mask = (1u << WINED3D_SHADER_TYPE_PIXEL) | (1u << WINED3D_SHADER_TYPE_VERTEX) | (1u << WINED3D_SHADER_TYPE_GEOMETRY) | (1u << WINED3D_SHADER_TYPE_HULL) | (1u << WINED3D_SHADER_TYPE_DOMAIN) | (1u << WINED3D_SHADER_TYPE_COMPUTE); } HRESULT wined3d_context_no3d_init(struct wined3d_context *context_no3d, struct wined3d_swapchain *swapchain) { TRACE("context_no3d %p, swapchain %p.\n", context_no3d, swapchain); wined3d_context_init(context_no3d, swapchain); return WINED3D_OK; } static BOOL fixed_get_input(BYTE usage, BYTE usage_idx, unsigned int *regnum) { if ((usage == WINED3D_DECL_USAGE_POSITION || usage == WINED3D_DECL_USAGE_POSITIONT) && !usage_idx) *regnum = WINED3D_FFP_POSITION; else if (usage == WINED3D_DECL_USAGE_BLEND_WEIGHT && !usage_idx) *regnum = WINED3D_FFP_BLENDWEIGHT; else if (usage == WINED3D_DECL_USAGE_BLEND_INDICES && !usage_idx) *regnum = WINED3D_FFP_BLENDINDICES; else if (usage == WINED3D_DECL_USAGE_NORMAL && !usage_idx) *regnum = WINED3D_FFP_NORMAL; else if (usage == WINED3D_DECL_USAGE_PSIZE && !usage_idx) *regnum = WINED3D_FFP_PSIZE; else if (usage == WINED3D_DECL_USAGE_COLOR && !usage_idx) *regnum = WINED3D_FFP_DIFFUSE; else if (usage == WINED3D_DECL_USAGE_COLOR && usage_idx == 1) *regnum = WINED3D_FFP_SPECULAR; else if (usage == WINED3D_DECL_USAGE_TEXCOORD && usage_idx < WINED3DDP_MAXTEXCOORD) *regnum = WINED3D_FFP_TEXCOORD0 + usage_idx; else { WARN("Unsupported input stream [usage=%s, usage_idx=%u].\n", debug_d3ddeclusage(usage), usage_idx); *regnum = ~0u; return FALSE; } return TRUE; } /* Context activation is done by the caller. */ void wined3d_stream_info_from_declaration(struct wined3d_stream_info *stream_info, const struct wined3d_state *state, const struct wined3d_d3d_info *d3d_info) { /* We need to deal with frequency data! */ struct wined3d_vertex_declaration *declaration = state->vertex_declaration; BOOL generic_attributes = d3d_info->ffp_generic_attributes; BOOL use_vshader = use_vs(state); unsigned int i; stream_info->use_map = 0; stream_info->swizzle_map = 0; stream_info->position_transformed = 0; if (!declaration) return; stream_info->position_transformed = declaration->position_transformed; /* Translate the declaration into strided data. */ for (i = 0; i < declaration->element_count; ++i) { const struct wined3d_vertex_declaration_element *element = &declaration->elements[i]; const struct wined3d_stream_state *stream = &state->streams[element->input_slot]; BOOL stride_used; unsigned int idx; TRACE("%p Element %p (%u of %u).\n", declaration->elements, element, i + 1, declaration->element_count); if (!stream->buffer) continue; TRACE("offset %u input_slot %u usage_idx %d.\n", element->offset, element->input_slot, element->usage_idx); if (use_vshader) { if (element->output_slot == WINED3D_OUTPUT_SLOT_UNUSED) { stride_used = FALSE; } else if (element->output_slot == WINED3D_OUTPUT_SLOT_SEMANTIC) { /* TODO: Assuming vertexdeclarations are usually used with the * same or a similar shader, it might be worth it to store the * last used output slot and try that one first. */ stride_used = vshader_get_input(state->shader[WINED3D_SHADER_TYPE_VERTEX], element->usage, element->usage_idx, &idx); } else { idx = element->output_slot; stride_used = TRUE; } } else { if (!generic_attributes && !element->ffp_valid) { WARN("Skipping unsupported fixed function element of format %s and usage %s.\n", debug_d3dformat(element->format->id), debug_d3ddeclusage(element->usage)); stride_used = FALSE; } else { stride_used = fixed_get_input(element->usage, element->usage_idx, &idx); } } if (stride_used) { TRACE("Load %s array %u [usage %s, usage_idx %u, " "input_slot %u, offset %u, stride %u, format %s, class %s, step_rate %u].\n", use_vshader ? "shader": "fixed function", idx, debug_d3ddeclusage(element->usage), element->usage_idx, element->input_slot, element->offset, stream->stride, debug_d3dformat(element->format->id), debug_d3dinput_classification(element->input_slot_class), element->instance_data_step_rate); stream_info->elements[idx].format = element->format; stream_info->elements[idx].data.buffer_object = 0; stream_info->elements[idx].data.addr = (BYTE *)NULL + stream->offset + element->offset; stream_info->elements[idx].stride = stream->stride; stream_info->elements[idx].stream_idx = element->input_slot; if (stream->flags & WINED3DSTREAMSOURCE_INSTANCEDATA) { stream_info->elements[idx].divisor = 1; } else if (element->input_slot_class == WINED3D_INPUT_PER_INSTANCE_DATA) { stream_info->elements[idx].divisor = element->instance_data_step_rate; if (!element->instance_data_step_rate) FIXME("Instance step rate 0 not implemented.\n"); } else { stream_info->elements[idx].divisor = 0; } if (!d3d_info->vertex_bgra && element->format->id == WINED3DFMT_B8G8R8A8_UNORM) { stream_info->swizzle_map |= 1u << idx; } stream_info->use_map |= 1u << idx; } } } /* Context activation is done by the caller. */ void context_update_stream_info(struct wined3d_context *context, const struct wined3d_state *state) { struct wined3d_stream_info *stream_info = &context->stream_info; const struct wined3d_d3d_info *d3d_info = context->d3d_info; DWORD prev_all_vbo = stream_info->all_vbo; unsigned int i; WORD map; wined3d_stream_info_from_declaration(stream_info, state, d3d_info); stream_info->all_vbo = 1; for (i = 0, map = stream_info->use_map; map; map >>= 1, ++i) { struct wined3d_stream_info_element *element; struct wined3d_bo_address data; struct wined3d_buffer *buffer; if (!(map & 1)) continue; element = &stream_info->elements[i]; buffer = state->streams[element->stream_idx].buffer; /* We can't use VBOs if the base vertex index is negative. OpenGL * doesn't accept negative offsets (or rather offsets bigger than the * VBO, because the pointer is unsigned), so use system memory * sources. In most sane cases the pointer - offset will still be > 0, * otherwise it will wrap around to some big value. Hope that with the * indices the driver wraps it back internally. If not, * draw_primitive_immediate_mode() is needed, including a vertex buffer * path. */ if (state->load_base_vertex_index < 0) { WARN_(d3d_perf)("load_base_vertex_index is < 0 (%d), not using VBOs.\n", state->load_base_vertex_index); element->data.buffer_object = 0; element->data.addr += (ULONG_PTR)wined3d_buffer_load_sysmem(buffer, context); if ((UINT_PTR)element->data.addr < -state->load_base_vertex_index * element->stride) FIXME("System memory vertex data load offset is negative!\n"); } else { wined3d_buffer_load(buffer, context, state); wined3d_buffer_get_memory(buffer, &data, buffer->locations); element->data.buffer_object = data.buffer_object; element->data.addr += (ULONG_PTR)data.addr; } if (!element->data.buffer_object) stream_info->all_vbo = 0; TRACE("Load array %u %s.\n", i, debug_bo_address(&element->data)); } if (prev_all_vbo != stream_info->all_vbo) context_invalidate_state(context, STATE_INDEXBUFFER); context->use_immediate_mode_draw = FALSE; if (stream_info->all_vbo) return; if (!use_vs(state)) { WORD slow_mask = -!d3d_info->ffp_generic_attributes & (1u << WINED3D_FFP_PSIZE); slow_mask |= -(!d3d_info->vertex_bgra && !d3d_info->ffp_generic_attributes) & ((1u << WINED3D_FFP_DIFFUSE) | (1u << WINED3D_FFP_SPECULAR) | (1u << WINED3D_FFP_BLENDWEIGHT)); if ((stream_info->position_transformed && !d3d_info->xyzrhw) || (stream_info->use_map & slow_mask)) context->use_immediate_mode_draw = TRUE; } } /* Context activation is done by the caller. */ static void context_preload_texture(struct wined3d_context *context, const struct wined3d_state *state, unsigned int idx) { struct wined3d_texture *texture; if (!(texture = state->textures[idx])) return; if ((texture->resource.rtv_full_bind_count_device + texture->resource.rtv_partial_bind_count_device) || (state->fb.depth_stencil && state->fb.depth_stencil->resource == &texture->resource)) context->uses_fbo_attached_resources = 1; wined3d_texture_load(texture, context, is_srgb_enabled(state->sampler_states[idx])); } /* Context activation is done by the caller. */ void context_preload_textures(struct wined3d_context *context, const struct wined3d_state *state) { unsigned int i; if (use_vs(state)) { for (i = 0; i < WINED3D_MAX_VERTEX_SAMPLERS; ++i) { if (state->shader[WINED3D_SHADER_TYPE_VERTEX]->reg_maps.resource_info[i].type) context_preload_texture(context, state, WINED3D_MAX_FRAGMENT_SAMPLERS + i); } } if (use_ps(state)) { for (i = 0; i < WINED3D_MAX_FRAGMENT_SAMPLERS; ++i) { if (state->shader[WINED3D_SHADER_TYPE_PIXEL]->reg_maps.resource_info[i].type) context_preload_texture(context, state, i); } } else { WORD ffu_map = context->fixed_function_usage_map; for (i = 0; ffu_map; ffu_map >>= 1, ++i) { if (ffu_map & 1) context_preload_texture(context, state, i); } } } void context_load_shader_resources(struct wined3d_context *context, const struct wined3d_state *state, unsigned int shader_mask) { struct wined3d_shader_sampler_map_entry *entry; struct wined3d_shader_resource_view *view; struct wined3d_shader *shader; unsigned int i, j; for (i = 0; i < WINED3D_SHADER_TYPE_COUNT; ++i) { if (!(shader_mask & (1u << i))) continue; if (!(shader = state->shader[i])) continue; for (j = 0; j < WINED3D_MAX_CBS; ++j) { if (state->cb[i][j]) wined3d_buffer_load(state->cb[i][j], context, state); } for (j = 0; j < shader->reg_maps.sampler_map.count; ++j) { entry = &shader->reg_maps.sampler_map.entries[j]; if (!(view = state->shader_resource_view[i][entry->resource_idx])) continue; if (view->resource->type == WINED3D_RTYPE_BUFFER) wined3d_buffer_load(buffer_from_resource(view->resource), context, state); else wined3d_texture_load(texture_from_resource(view->resource), context, FALSE); } } } void context_load_unordered_access_resources(struct wined3d_context *context, const struct wined3d_shader *shader, struct wined3d_unordered_access_view * const *views) { struct wined3d_unordered_access_view *view; struct wined3d_texture *texture; struct wined3d_buffer *buffer; unsigned int i; context->uses_uavs = 0; if (!shader) return; for (i = 0; i < MAX_UNORDERED_ACCESS_VIEWS; ++i) { if (!(view = views[i])) continue; if (view->resource->type == WINED3D_RTYPE_BUFFER) { buffer = buffer_from_resource(view->resource); wined3d_buffer_load_location(buffer, context, WINED3D_LOCATION_BUFFER); wined3d_unordered_access_view_invalidate_location(view, ~WINED3D_LOCATION_BUFFER); } else { texture = texture_from_resource(view->resource); wined3d_texture_load(texture, context, FALSE); wined3d_unordered_access_view_invalidate_location(view, ~WINED3D_LOCATION_TEXTURE_RGB); } context->uses_uavs = 1; } } void context_load_stream_output_buffers(struct wined3d_context *context, const struct wined3d_state *state) { unsigned int i; for (i = 0; i < ARRAY_SIZE(state->stream_output); ++i) { struct wined3d_buffer *buffer; if (!(buffer = state->stream_output[i].buffer)) continue; wined3d_buffer_load(buffer, context, state); wined3d_buffer_invalidate_location(buffer, ~WINED3D_LOCATION_BUFFER); } }