diff --git a/dlls/wined3d/arb_program_shader.c b/dlls/wined3d/arb_program_shader.c index e48303306f4..79148e5eef8 100644 --- a/dlls/wined3d/arb_program_shader.c +++ b/dlls/wined3d/arb_program_shader.c @@ -693,17 +693,6 @@ void pshader_hw_texreg2gb(SHADER_OPCODE_ARG* arg) { } void pshader_hw_texbem(SHADER_OPCODE_ARG* arg) { -#if 0 - SHADER_BUFFER* buffer = arg->buffer; - DWORD reg1 = arg->dst & WINED3DSP_REGNUM_MASK; - DWORD reg2 = arg->src[0] & WINED3DSP_REGNUM_MASK; - char dst_str[8]; - - /* FIXME: Should apply the BUMPMAPENV matrix */ - sprintf(dst_str, "T%u", reg1); - shader_addline(buffer, "ADD TMP.rg, fragment.texcoord[%u], T%u;\n", reg1, reg2); - shader_hw_sample(arg, reg1, dst_str, "TMP"); -#endif IWineD3DPixelShaderImpl* This = (IWineD3DPixelShaderImpl*) arg->shader; DWORD dst = arg->dst; @@ -720,6 +709,17 @@ void pshader_hw_texbem(SHADER_OPCODE_ARG* arg) { if(This->bumpenvmatconst) { /*shader_addline(buffer, "MOV T%u, fragment.texcoord[%u];\n", 1, 1); Not needed - done already */ + + /* Plain GL does not have any signed formats suitable for that instruction. + * So the surface loading code converts the -128 ... 127 signed integers to + * 0 ... 255 unsigned ones. The following line undoes that. + * + * TODO: Both GL_NV_texture_shader and GL_ATI_envmap_bumpmap provide pixel formats + * suitable for loading the Direct3D perturbation data. If one of them is used, do + * not correct the signedness + */ + shader_addline(buffer, "MAD T%u, T%u, coefmul.x, -one;\n", src, src); + shader_addline(buffer, "SWZ TMP2, bumpenvmat, x, z, 0, 0;\n"); shader_addline(buffer, "DP3 TMP.r, TMP2, T%u;\n", src); shader_addline(buffer, "SWZ TMP2, bumpenvmat, y, w, 0, 0;\n"); diff --git a/dlls/wined3d/surface.c b/dlls/wined3d/surface.c index 95b7cc78c55..2a1cc60e009 100644 --- a/dlls/wined3d/surface.c +++ b/dlls/wined3d/surface.c @@ -1451,6 +1451,10 @@ HRESULT d3dfmt_get_conv(IWineD3DSurfaceImpl *This, BOOL need_alpha_ck, BOOL use_ break; case WINED3DFMT_V8U8: + /* TODO: GL_NV_texture_shader and GL_ATI_envmap_bumpmap provide suitable formats. + * use one of them instead of converting + * Remember to adjust the texbem instruction in the shader + */ *convert = CONVERT_V8U8; *format = GL_BGR; *internal = GL_RGB8; @@ -1581,15 +1585,15 @@ HRESULT d3dfmt_convert_surface(BYTE *src, BYTE *dst, UINT pitch, UINT width, UIN { unsigned int x, y; short *Source; - char *Dest; + unsigned char *Dest; for(y = 0; y < height; y++) { Source = (short *) (src + y * pitch); - Dest = (char *) (dst + y * outpitch); + Dest = (unsigned char *) (dst + y * outpitch); for (x = 0; x < width; x++ ) { long color = (*Source++); - Dest[0] = color >> 8; - Dest[1] = color; - Dest[2] = 0xff; + /* B */ Dest[0] = 0xff; + /* G */ Dest[1] = (color >> 8) + 128; /* V */ + /* R */ Dest[2] = (color) + 128; /* U */ Dest += 3; } }