diff --git a/dlls/wined3d/utils.c b/dlls/wined3d/utils.c index 5f35ffbc77a..791ddf346c8 100644 --- a/dlls/wined3d/utils.c +++ b/dlls/wined3d/utils.c @@ -294,25 +294,29 @@ static void convert_r5g5_snorm_l6_unorm(const BYTE *src, BYTE *dst, UINT src_row UINT dst_row_pitch, UINT dst_slice_pitch, UINT width, UINT height, UINT depth) { unsigned int x, y, z; - const WORD *Source; + unsigned char r_in, g_in, l_in; + const unsigned short *texel_in; + unsigned short *texel_out; + /* Emulating signed 5 bit values with unsigned 5 bit values has some precision problems by design: + * E.g. the signed input value 0 becomes 16. GL normalizes it to 16 / 31 = 0.516. We convert it + * back to a signed value by subtracting 0.5 and multiplying by 2.0. The resulting value is + * ((16 / 31) - 0.5) * 2.0 = 0.032, which is quite different from the intended result 0.000. */ for (z = 0; z < depth; z++) { for (y = 0; y < height; y++) { - unsigned short *Dest_s = (unsigned short *) (dst + z * dst_slice_pitch + y * dst_row_pitch); - Source = (const WORD *)(src + z * src_slice_pitch + y * src_row_pitch); + texel_out = (unsigned short *) (dst + z * dst_slice_pitch + y * dst_row_pitch); + texel_in = (const unsigned short *)(src + z * src_slice_pitch + y * src_row_pitch); for (x = 0; x < width; x++ ) { - short color = (*Source++); - unsigned char l = ((color >> 10) & 0xfc); - short v = ((color >> 5) & 0x3e); - short u = ((color ) & 0x1f); - short v_conv = v + 16; - short u_conv = u + 16; + l_in = (*texel_in & 0xfc00) >> 10; + g_in = (*texel_in & 0x03e0) >> 5; + r_in = *texel_in & 0x001f; - *Dest_s = ((v_conv << 11) & 0xf800) | ((l << 5) & 0x7e0) | (u_conv & 0x1f); - Dest_s += 1; + *texel_out = ((r_in + 16) << 11) | (l_in << 5) | (g_in + 16); + texel_out++; + texel_in++; } } }