Added support for managing reserved memory areas in libwine and ntdll.

Try to reserve everything above 0x80000000 on startup.
This commit is contained in:
Alexandre Julliard 2004-05-25 01:29:24 +00:00
parent 307edcca15
commit 94d74b5fed
10 changed files with 590 additions and 229 deletions

View File

@ -1116,8 +1116,6 @@ void __wine_kernel_init(void)
}
found:
wine_free_pe_load_area(); /* the main binary is loaded, we don't need this anymore */
/* build command line */
set_library_wargv( __wine_main_argv );
if (!build_command_line( __wine_main_wargv )) goto error;

View File

@ -57,6 +57,10 @@ WINE_DECLARE_DEBUG_CHANNEL(module);
#define MS_SYNC 0
#endif
#ifndef MAP_NORESERVE
#define MAP_NORESERVE 0
#endif
/* File view */
typedef struct file_view
{
@ -112,16 +116,15 @@ static CRITICAL_SECTION csVirtual = { &critsect_debug, -1, 0, 0, 0, 0 };
# define page_mask 0xfff
# define page_shift 12
# define page_size 0x1000
/* Note: ADDRESS_SPACE_LIMIT is a Windows limit, you cannot change it.
* If you are on Solaris you need to find a way to avoid having the system
* allocate things above 0xc000000. Don't touch that define.
*/
# define ADDRESS_SPACE_LIMIT ((void *)0xc0000000) /* top of the user address space */
/* Note: these are Windows limits, you cannot change them. */
# define ADDRESS_SPACE_LIMIT ((void *)0xc0000000) /* top of the total available address space */
# define USER_SPACE_LIMIT ((void *)0x80000000) /* top of the user address space */
#else
static UINT page_shift;
static UINT page_mask;
static UINT page_size;
# define ADDRESS_SPACE_LIMIT 0 /* no limit needed on other platforms */
# define USER_SPACE_LIMIT 0 /* no limit needed on other platforms */
#endif /* __i386__ */
#define granularity_mask 0xffff /* Allocation granularity (usually 64k) */
@ -219,24 +222,88 @@ static struct file_view *VIRTUAL_FindView( const void *addr ) /* [in] Address */
{
struct file_view *view = LIST_ENTRY( ptr, struct file_view, entry );
if (view->base > addr) break;
if ((char*)view->base + view->size > (char*)addr) return view;
if ((char *)view->base + view->size > (const char *)addr) return view;
}
return NULL;
}
/***********************************************************************
* VIRTUAL_DeleteView
* find_view_range
*
* Find the first view overlapping at least part of the specified range.
* The csVirtual section must be held by caller.
*/
static struct file_view *find_view_range( const void *addr, size_t size )
{
struct list *ptr;
LIST_FOR_EACH( ptr, &views_list )
{
struct file_view *view = LIST_ENTRY( ptr, struct file_view, entry );
if ((char *)view->base >= (const char *)addr + size) break;
if ((char *)view->base + view->size > (const char *)addr) return view;
}
return NULL;
}
/***********************************************************************
* add_reserved_area
*
* Add a reserved area to the list maintained by libwine.
* The csVirtual section must be held by caller.
*/
static void add_reserved_area( void *addr, size_t size )
{
TRACE( "adding %p-%p\n", addr, (char *)addr + size );
if (addr < USER_SPACE_LIMIT)
{
/* unmap the part of the area that is below the limit */
assert( (char *)addr + size > (char *)USER_SPACE_LIMIT );
munmap( addr, (char *)USER_SPACE_LIMIT - (char *)addr );
size -= (char *)USER_SPACE_LIMIT - (char *)addr;
addr = USER_SPACE_LIMIT;
}
wine_mmap_add_reserved_area( addr, size );
}
/***********************************************************************
* is_beyond_limit
*
* Check if an address range goes beyond a given limit.
*/
static inline int is_beyond_limit( void *addr, size_t size, void *limit )
{
return (limit && (addr >= limit || (char *)addr + size > (char *)limit));
}
/***********************************************************************
* unmap_area
*
* Unmap an area, or simply replace it by an empty mapping if it is
* in a reserved area. The csVirtual section must be held by caller.
*/
static inline void unmap_area( void *addr, size_t size )
{
if (wine_mmap_is_in_reserved_area( addr, size ))
wine_anon_mmap( addr, size, PROT_NONE, MAP_NORESERVE | MAP_FIXED );
else
munmap( addr, size );
}
/***********************************************************************
* delete_view
*
* Deletes a view. The csVirtual section must be held by caller.
*
* RETURNS
* None
*/
static void VIRTUAL_DeleteView( struct file_view *view ) /* [in] View */
static void delete_view( struct file_view *view ) /* [in] View */
{
if (!(view->flags & VFLAG_SYSTEM))
munmap( (void *)view->base, view->size );
if (!(view->flags & VFLAG_SYSTEM)) unmap_area( view->base, view->size );
list_remove( &view->entry );
if (view->mapping) NtClose( view->mapping );
free( view );
@ -290,7 +357,7 @@ static NTSTATUS create_view( struct file_view **view_ret, void *base, size_t siz
prev->base, (char *)prev->base + prev->size,
base, (char *)base + view->size );
assert( prev->flags & VFLAG_SYSTEM );
VIRTUAL_DeleteView( prev );
delete_view( prev );
}
}
if ((ptr = list_next( &views_list, &view->entry )) != NULL)
@ -302,7 +369,7 @@ static NTSTATUS create_view( struct file_view **view_ret, void *base, size_t siz
next->base, (char *)next->base + next->size,
base, (char *)base + view->size );
assert( next->flags & VFLAG_SYSTEM );
VIRTUAL_DeleteView( next );
delete_view( next );
}
}
@ -450,26 +517,52 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size,
if (base)
{
if ((ptr = wine_anon_mmap( base, size, VIRTUAL_GetUnixProt(vprot), 0 )) == (void *)-1)
if (is_beyond_limit( base, size, ADDRESS_SPACE_LIMIT ))
return STATUS_WORKING_SET_LIMIT_RANGE;
switch (wine_mmap_is_in_reserved_area( base, size ))
{
if (errno == ENOMEM) return STATUS_NO_MEMORY;
return STATUS_INVALID_PARAMETER;
}
if (ptr != base)
{
/* We couldn't get the address we wanted */
munmap( ptr, size );
case -1: /* partially in a reserved area */
return STATUS_CONFLICTING_ADDRESSES;
case 0: /* not in a reserved area, do a normal allocation */
if ((ptr = wine_anon_mmap( base, size, VIRTUAL_GetUnixProt(vprot), 0 )) == (void *)-1)
{
if (errno == ENOMEM) return STATUS_NO_MEMORY;
return STATUS_INVALID_PARAMETER;
}
if (ptr != base)
{
/* We couldn't get the address we wanted */
if (is_beyond_limit( ptr, size, USER_SPACE_LIMIT )) add_reserved_area( ptr, size );
else munmap( ptr, size );
return STATUS_CONFLICTING_ADDRESSES;
}
break;
default:
case 1: /* in a reserved area, make sure the address is available */
if (find_view_range( base, size )) return STATUS_CONFLICTING_ADDRESSES;
/* replace the reserved area by our mapping */
if ((ptr = wine_anon_mmap( base, size, VIRTUAL_GetUnixProt(vprot), MAP_FIXED )) != base)
return STATUS_INVALID_PARAMETER;
break;
}
}
else
{
size_t view_size = size + granularity_mask + 1;
if ((ptr = wine_anon_mmap( NULL, view_size, VIRTUAL_GetUnixProt(vprot), 0 )) == (void *)-1)
for (;;)
{
if (errno == ENOMEM) return STATUS_NO_MEMORY;
return STATUS_INVALID_PARAMETER;
if ((ptr = wine_anon_mmap( NULL, view_size, VIRTUAL_GetUnixProt(vprot), 0 )) == (void *)-1)
{
if (errno == ENOMEM) return STATUS_NO_MEMORY;
return STATUS_INVALID_PARAMETER;
}
/* if we got something beyond the user limit, unmap it and retry */
if (is_beyond_limit( ptr, view_size, USER_SPACE_LIMIT )) add_reserved_area( ptr, view_size );
else break;
}
/* Release the extra memory while keeping the range
@ -486,7 +579,7 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size,
}
status = create_view( view_ret, ptr, size, vprot );
if (status != STATUS_SUCCESS) munmap( ptr, size );
if (status != STATUS_SUCCESS) unmap_area( ptr, size );
return status;
}
@ -885,7 +978,7 @@ static NTSTATUS map_image( HANDLE hmapping, int fd, char *base, DWORD total_size
return STATUS_SUCCESS;
error:
if (view) VIRTUAL_DeleteView( view );
if (view) delete_view( view );
RtlLeaveCriticalSection( &csVirtual );
return status;
}
@ -1040,7 +1133,7 @@ NTSTATUS WINAPI NtAllocateVirtualMemory( HANDLE process, PVOID *ret, PVOID addr,
/* disallow low 64k, wrap-around and kernel space */
if (((char *)base <= (char *)granularity_mask) ||
((char *)base + size < (char *)base) ||
(ADDRESS_SPACE_LIMIT && ((char *)base + size > (char *)ADDRESS_SPACE_LIMIT)))
is_beyond_limit( base, size, ADDRESS_SPACE_LIMIT ))
return STATUS_INVALID_PARAMETER;
}
else
@ -1148,7 +1241,7 @@ NTSTATUS WINAPI NtFreeVirtualMemory( HANDLE process, PVOID *addr_ptr, ULONG *siz
*addr_ptr = view->base;
*size_ptr = view->size;
view->flags |= VFLAG_SYSTEM;
VIRTUAL_DeleteView( view );
delete_view( view );
}
else if (type == MEM_RELEASE)
{
@ -1157,7 +1250,7 @@ NTSTATUS WINAPI NtFreeVirtualMemory( HANDLE process, PVOID *addr_ptr, ULONG *siz
if (size || (base != view->base)) status = STATUS_INVALID_PARAMETER;
else
{
VIRTUAL_DeleteView( view );
delete_view( view );
*addr_ptr = base;
*size_ptr = size;
}
@ -1264,7 +1357,7 @@ NTSTATUS WINAPI NtQueryVirtualMemory( HANDLE process, LPCVOID addr,
if (info_class != MemoryBasicInformation) return STATUS_INVALID_INFO_CLASS;
if (ADDRESS_SPACE_LIMIT && addr >= ADDRESS_SPACE_LIMIT)
return STATUS_WORKING_SET_LIMIT_RANGE; /* FIXME */
return STATUS_WORKING_SET_LIMIT_RANGE;
if (!is_current_process( process ))
{
@ -1282,7 +1375,18 @@ NTSTATUS WINAPI NtQueryVirtualMemory( HANDLE process, LPCVOID addr,
{
if (!ptr)
{
size = (char *)ADDRESS_SPACE_LIMIT - alloc_base;
/* make the address space end at the user limit, except if
* the last view was mapped beyond that */
if (alloc_base < (char *)USER_SPACE_LIMIT)
{
if (USER_SPACE_LIMIT && base >= (char *)USER_SPACE_LIMIT)
{
RtlLeaveCriticalSection( &csVirtual );
return STATUS_WORKING_SET_LIMIT_RANGE;
}
size = (char *)USER_SPACE_LIMIT - alloc_base;
}
else size = (char *)ADDRESS_SPACE_LIMIT - alloc_base;
view = NULL;
break;
}
@ -1585,7 +1689,7 @@ NTSTATUS WINAPI NtMapViewOfSection( HANDLE handle, HANDLE process, PVOID *addr_p
{
ERR( "map_file_into_view %p %x %lx%08lx failed\n",
view->base, size, offset->u.HighPart, offset->u.LowPart );
VIRTUAL_DeleteView( view );
delete_view( view );
}
RtlLeaveCriticalSection( &csVirtual );
@ -1614,7 +1718,7 @@ NTSTATUS WINAPI NtUnmapViewOfSection( HANDLE process, PVOID addr )
RtlEnterCriticalSection( &csVirtual );
if ((view = VIRTUAL_FindView( base )) && (base == view->base))
{
VIRTUAL_DeleteView( view );
delete_view( view );
status = STATUS_SUCCESS;
}
RtlLeaveCriticalSection( &csVirtual );

View File

@ -70,10 +70,16 @@ extern int wine_dbg_parse_options( const char *str );
/* portability */
extern void DECLSPEC_NORETURN wine_switch_to_stack( void (*func)(void *), void *arg, void *stack );
extern void *wine_anon_mmap( void *start, size_t size, int prot, int flags );
extern void wine_set_pe_load_area( void *base, size_t size );
extern void wine_free_pe_load_area(void);
/* memory mappings */
extern void *wine_anon_mmap( void *start, size_t size, int prot, int flags );
extern void wine_mmap_add_reserved_area( void *addr, size_t size );
extern void wine_mmap_remove_reserved_area( void *addr, size_t size, int unmap );
extern int wine_mmap_is_in_reserved_area( void *addr, size_t size );
/* LDT management */
extern void wine_ldt_init_locking( void (*lock_func)(void), void (*unlock_func)(void) );

View File

@ -13,6 +13,7 @@ C_SRCS = \
debug.c \
ldt.c \
loader.c \
mmap.c \
port.c
@MAKE_LIB_RULES@

View File

@ -71,6 +71,7 @@ static const char **dll_paths;
static int nb_dll_paths;
static int dll_path_maxlen;
extern void mmap_init(void);
/* build the dll load path from the WINEDLLPATH variable */
static void build_dll_path(void)
@ -516,6 +517,7 @@ void wine_init( int argc, char *argv[], char *error, int error_size )
__wine_main_argc = argc;
__wine_main_argv = argv;
__wine_main_environ = environ;
mmap_init();
if ((wine_debug = getenv("WINEDEBUG")))
{

435
libs/wine/mmap.c Normal file
View File

@ -0,0 +1,435 @@
/*
* Wine memory mappings support
*
* Copyright 2000, 2004 Alexandre Julliard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <ctype.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_STDINT_H
# include <stdint.h>
#endif
#include "wine/library.h"
#include "wine/list.h"
struct reserved_area
{
struct list entry;
void *base;
size_t size;
};
static struct list reserved_areas = LIST_INIT(reserved_areas);
static const int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
#ifndef MAP_NORESERVE
#define MAP_NORESERVE 0
#endif
#ifndef HAVE_MMAP
static inline int munmap( void *ptr, size_t size ) { return 0; }
#endif
#if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
/***********************************************************************
* try_mmap_fixed
*
* The purpose of this routine is to emulate the behaviour of
* the Linux mmap() routine if a non-NULL address is passed,
* but the MAP_FIXED flag is not set. Linux in this case tries
* to place the mapping at the specified address, *unless* the
* range is already in use. Solaris, however, completely ignores
* the address argument in this case.
*
* As Wine code occasionally relies on the Linux behaviour, e.g. to
* be able to map non-relocateable PE executables to their proper
* start addresses, or to map the DOS memory to 0, this routine
* emulates the Linux behaviour by checking whether the desired
* address range is still available, and placing the mapping there
* using MAP_FIXED if so.
*/
static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
int fildes, off_t off)
{
char * volatile result = NULL;
int pagesize = getpagesize();
pid_t pid;
/* We only try to map to a fixed address if
addr is non-NULL and properly aligned,
and MAP_FIXED isn't already specified. */
if ( !addr )
return 0;
if ( (uintptr_t)addr & (pagesize-1) )
return 0;
if ( flags & MAP_FIXED )
return 0;
/* We use vfork() to freeze all threads of the
current process. This allows us to check without
race condition whether the desired memory range is
already in use. Note that because vfork() shares
the address spaces between parent and child, we
can actually perform the mapping in the child. */
if ( (pid = vfork()) == -1 )
{
perror("try_mmap_fixed: vfork");
exit(1);
}
if ( pid == 0 )
{
int i;
char vec;
/* We call mincore() for every page in the desired range.
If any of these calls succeeds, the page is already
mapped and we must fail. */
for ( i = 0; i < len; i += pagesize )
if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
_exit(1);
/* Perform the mapping with MAP_FIXED set. This is safe
now, as none of the pages is currently in use. */
result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
if ( result == addr )
_exit(0);
if ( result != (void *) -1 ) /* This should never happen ... */
munmap( result, len );
_exit(1);
}
/* vfork() lets the parent continue only after the child
has exited. Furthermore, Wine sets SIGCHLD to SIG_IGN,
so we don't need to wait for the child. */
return result == addr;
}
#endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
/***********************************************************************
* wine_anon_mmap
*
* Portable wrapper for anonymous mmaps
*/
void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
{
#ifdef HAVE_MMAP
static int fdzero = -1;
#ifdef MAP_ANON
flags |= MAP_ANON;
#else
if (fdzero == -1)
{
if ((fdzero = open( "/dev/zero", O_RDONLY )) == -1)
{
perror( "/dev/zero: open" );
exit(1);
}
}
#endif /* MAP_ANON */
#ifdef MAP_SHARED
flags &= ~MAP_SHARED;
#endif
/* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
#ifdef MAP_PRIVATE
flags |= MAP_PRIVATE;
#endif
if (!(flags & MAP_FIXED))
{
#ifdef MAP_TRYFIXED
/* If available, this will attempt a fixed mapping in-kernel */
flags |= MAP_TRYFIXED;
#elif defined(__svr4__) || defined(__NetBSD__)
if ( try_mmap_fixed( start, size, prot, flags, fdzero, 0 ) )
return start;
#endif
}
return mmap( start, size, prot, flags, fdzero, 0 );
#else
return (void *)-1;
#endif
}
#ifdef __i386__
/***********************************************************************
* reserve_area
*
* Reserve as much memory as possible in the given area.
* FIXME: probably needs a different algorithm for Solaris
*/
static void reserve_area( void *addr, void *end )
{
void *ptr;
size_t size = (char *)end - (char *)addr;
struct list *prev;
struct reserved_area *area;
if ((ptr = wine_anon_mmap( addr, size, PROT_NONE, MAP_NORESERVE )) != (void *)-1)
{
if (ptr == addr)
{
if (!end) size--; /* avoid wrap-around */
/* try to merge it with the previous one */
if ((prev = list_tail( &reserved_areas )))
{
area = LIST_ENTRY( prev, struct reserved_area, entry );
if (area && (char *)area->base + area->size == (char *)ptr)
{
area->size += size;
return;
}
}
/* create a new area */
if ((area = malloc( sizeof(*area) )))
{
area->base = addr;
area->size = size;
list_add_tail( &reserved_areas, &area->entry );
return;
}
}
else munmap( ptr, size );
}
if (size > granularity_mask + 1)
{
size_t new_size = (size / 2) & ~granularity_mask;
reserve_area( addr, (char *)addr + new_size );
reserve_area( (char *)addr + new_size, end );
}
}
/***********************************************************************
* mmap_init
*/
void mmap_init(void)
{
static char * const user_space_limit = (char *)0x80000000;
char stack;
char * const stack_ptr = &stack;
if (stack_ptr >= user_space_limit)
{
char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
if (base > user_space_limit) reserve_area( user_space_limit, base );
base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
#ifdef linux
/* Linux heuristic: if the stack top is at c0000000, assume the address space */
/* ends there, this avoids a lot of futile allocation attempts */
if (base != (char *)0xc0000000)
#endif
reserve_area( base, 0 );
}
else reserve_area( user_space_limit, 0 );
}
#else /* __i386__ */
void mmap_init(void)
{
}
#endif
/***********************************************************************
* wine_mmap_add_reserved_area
*
* Add an address range to the list of reserved areas.
* Caller must have made sure the range is not used by anything else.
*
* Note: the reserved areas functions are not reentrant, caller is
* responsible for proper locking.
*/
void wine_mmap_add_reserved_area( void *addr, size_t size )
{
struct reserved_area *area;
struct list *ptr;
if (!((char *)addr + size)) size--; /* avoid wrap-around */
/* blow away existing mappings */
wine_anon_mmap( addr, size, PROT_NONE, MAP_NORESERVE | MAP_FIXED );
LIST_FOR_EACH( ptr, &reserved_areas )
{
area = LIST_ENTRY( ptr, struct reserved_area, entry );
if (area->base > addr)
{
/* try to merge with the next one */
if ((char *)addr + size == (char *)area->base)
{
area->base = addr;
area->size += size;
return;
}
break;
}
else if ((char *)area->base + area->size == (char *)addr)
{
/* merge with the previous one */
area->size += size;
/* try to merge with the next one too */
if ((ptr = list_next( &reserved_areas, ptr )))
{
struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
if ((char *)addr + size == (char *)next->base)
{
area->size += next->size;
list_remove( &next->entry );
free( next );
}
}
return;
}
}
if ((area = malloc( sizeof(*area) )))
{
area->base = addr;
area->size = size;
list_add_before( ptr, &area->entry );
}
}
/***********************************************************************
* wine_mmap_remove_reserved_area
*
* Remove an address range from the list of reserved areas.
* If 'unmap' is non-zero the range is unmapped too.
*
* Note: the reserved areas functions are not reentrant, caller is
* responsible for proper locking.
*/
void wine_mmap_remove_reserved_area( void *addr, size_t size, int unmap )
{
struct reserved_area *area;
struct list *ptr;
if (!((char *)addr + size)) size--; /* avoid wrap-around */
ptr = list_head( &reserved_areas );
/* find the first area covering address */
while (ptr)
{
area = LIST_ENTRY( ptr, struct reserved_area, entry );
if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
{
if (area->base >= addr)
{
if ((char *)area->base + area->size > (char *)addr + size)
{
/* range overlaps beginning of area only -> shrink area */
if (unmap) munmap( area->base, (char *)addr + size - (char *)area->base );
area->size -= (char *)addr + size - (char *)area->base;
area->base = (char *)addr + size;
break;
}
else
{
/* range contains the whole area -> remove area completely */
ptr = list_next( &reserved_areas, ptr );
if (unmap) munmap( area->base, area->size );
list_remove( &area->entry );
free( area );
continue;
}
}
else
{
if ((char *)area->base + area->size > (char *)addr + size)
{
/* range is in the middle of area -> split area in two */
struct reserved_area *new_area = malloc( sizeof(*new_area) );
if (new_area)
{
new_area->base = (char *)addr + size;
new_area->size = (char *)area->base + area->size - (char *)new_area->base;
list_add_after( ptr, &new_area->entry );
}
else size = (char *)area->base + area->size - (char *)addr;
area->size = (char *)addr - (char *)area->base;
if (unmap) munmap( addr, size );
break;
}
else
{
/* range overlaps end of area only -> shrink area */
if (unmap) munmap( addr, (char *)area->base + area->size - (char *)addr );
area->size = (char *)addr - (char *)area->base;
}
}
}
ptr = list_next( &reserved_areas, ptr );
}
}
/***********************************************************************
* wine_mmap_is_in_reserved_area
*
* Check if the specified range is included in a reserved area.
* Returns 1 if range is fully included, 0 if range is not included
* at all, and -1 if it is only partially included.
*
* Note: the reserved areas functions are not reentrant, caller is
* responsible for proper locking.
*/
int wine_mmap_is_in_reserved_area( void *addr, size_t size )
{
struct reserved_area *area;
struct list *ptr;
LIST_FOR_EACH( ptr, &reserved_areas )
{
area = LIST_ENTRY( ptr, struct reserved_area, entry );
if (area->base > addr) break;
if ((char *)area->base + area->size <= (char *)addr) continue;
/* area must contain block completely */
if ((char *)area->base + area->size < (char *)addr + size) return -1;
return 1;
}
return 0;
}

View File

@ -21,21 +21,9 @@
#include "config.h"
#include "wine/port.h"
#include <assert.h>
#include <ctype.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#ifdef HAVE_SYS_MMAN_H
#include <sys/mman.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_STDINT_H
# include <stdint.h>
#endif
#include "wine/library.h"
#include "wine/pthread.h"
@ -149,173 +137,3 @@ __ASM_GLOBAL_FUNC( wine_switch_to_stack,
#else
#error You must implement wine_switch_to_stack for your platform
#endif
static char *pe_area;
static size_t pe_area_size;
/***********************************************************************
* wine_set_pe_load_area
*
* Define the reserved area to use for loading the main PE binary.
*/
void wine_set_pe_load_area( void *base, size_t size )
{
unsigned int page_mask = getpagesize() - 1;
char *end = (char *)base + size;
pe_area = (char *)(((unsigned long)base + page_mask) & ~page_mask);
pe_area_size = (end - pe_area) & ~page_mask;
}
/***********************************************************************
* wine_free_pe_load_area
*
* Free the reserved area to use for loading the main PE binary.
*/
void wine_free_pe_load_area(void)
{
#ifdef HAVE_MMAP
if (pe_area) munmap( pe_area, pe_area_size );
#endif
pe_area = NULL;
}
#if (defined(__svr4__) || defined(__NetBSD__)) && !defined(MAP_TRYFIXED)
/***********************************************************************
* try_mmap_fixed
*
* The purpose of this routine is to emulate the behaviour of
* the Linux mmap() routine if a non-NULL address is passed,
* but the MAP_FIXED flag is not set. Linux in this case tries
* to place the mapping at the specified address, *unless* the
* range is already in use. Solaris, however, completely ignores
* the address argument in this case.
*
* As Wine code occasionally relies on the Linux behaviour, e.g. to
* be able to map non-relocateable PE executables to their proper
* start addresses, or to map the DOS memory to 0, this routine
* emulates the Linux behaviour by checking whether the desired
* address range is still available, and placing the mapping there
* using MAP_FIXED if so.
*/
static int try_mmap_fixed (void *addr, size_t len, int prot, int flags,
int fildes, off_t off)
{
char * volatile result = NULL;
int pagesize = getpagesize();
pid_t pid;
/* We only try to map to a fixed address if
addr is non-NULL and properly aligned,
and MAP_FIXED isn't already specified. */
if ( !addr )
return 0;
if ( (uintptr_t)addr & (pagesize-1) )
return 0;
if ( flags & MAP_FIXED )
return 0;
/* We use vfork() to freeze all threads of the
current process. This allows us to check without
race condition whether the desired memory range is
already in use. Note that because vfork() shares
the address spaces between parent and child, we
can actually perform the mapping in the child. */
if ( (pid = vfork()) == -1 )
{
perror("try_mmap_fixed: vfork");
exit(1);
}
if ( pid == 0 )
{
int i;
char vec;
/* We call mincore() for every page in the desired range.
If any of these calls succeeds, the page is already
mapped and we must fail. */
for ( i = 0; i < len; i += pagesize )
if ( mincore( (caddr_t)addr + i, pagesize, &vec ) != -1 )
_exit(1);
/* Perform the mapping with MAP_FIXED set. This is safe
now, as none of the pages is currently in use. */
result = mmap( addr, len, prot, flags | MAP_FIXED, fildes, off );
if ( result == addr )
_exit(0);
if ( result != (void *) -1 ) /* This should never happen ... */
munmap( result, len );
_exit(1);
}
/* vfork() lets the parent continue only after the child
has exited. Furthermore, Wine sets SIGCHLD to SIG_IGN,
so we don't need to wait for the child. */
return result == addr;
}
#endif /* (__svr4__ || __NetBSD__) && !MAP_TRYFIXED */
/***********************************************************************
* wine_anon_mmap
*
* Portable wrapper for anonymous mmaps
*/
void *wine_anon_mmap( void *start, size_t size, int prot, int flags )
{
#ifdef HAVE_MMAP
static int fdzero = -1;
#ifdef MAP_ANON
flags |= MAP_ANON;
#else
if (fdzero == -1)
{
if ((fdzero = open( "/dev/zero", O_RDONLY )) == -1)
{
perror( "/dev/zero: open" );
exit(1);
}
}
#endif /* MAP_ANON */
#ifdef MAP_SHARED
flags &= ~MAP_SHARED;
#endif
/* Linux EINVAL's on us if we don't pass MAP_PRIVATE to an anon mmap */
#ifdef MAP_PRIVATE
flags |= MAP_PRIVATE;
#endif
if (pe_area && start &&
(char *)start >= pe_area &&
(char *)start + size <= pe_area + pe_area_size)
{
wine_free_pe_load_area();
flags |= MAP_FIXED;
}
if (!(flags & MAP_FIXED))
{
#ifdef MAP_TRYFIXED
/* If available, this will attempt a fixed mapping in-kernel */
flags |= MAP_TRYFIXED;
#elif defined(__svr4__) || defined(__NetBSD__)
if ( try_mmap_fixed( start, size, prot, flags, fdzero, 0 ) )
return start;
#endif
}
return mmap( start, size, prot, flags, fdzero, 0 );
#else
return (void *)-1;
#endif
}

View File

@ -32,7 +32,6 @@ EXPORTS
wine_dlopen
wine_dlsym
wine_exec_wine_binary
wine_free_pe_load_area
wine_get_config_dir
wine_get_cs
wine_get_ds
@ -56,6 +55,9 @@ EXPORTS
wine_ldt_is_system
wine_ldt_realloc_entries
wine_ldt_set_entry
wine_mmap_add_reserved_area
wine_mmap_is_in_reserved_area
wine_mmap_remove_reserved_area
wine_pthread_abort_thread
wine_pthread_create_thread
wine_pthread_exit_thread
@ -64,5 +66,4 @@ EXPORTS
wine_pthread_init_thread
wine_set_fs
wine_set_gs
wine_set_pe_load_area
wine_switch_to_stack

View File

@ -32,7 +32,6 @@ WINE_1.0
wine_dlopen;
wine_dlsym;
wine_exec_wine_binary;
wine_free_pe_load_area;
wine_get_config_dir;
wine_get_cs;
wine_get_ds;
@ -56,6 +55,9 @@ WINE_1.0
wine_ldt_is_system;
wine_ldt_realloc_entries;
wine_ldt_set_entry;
wine_mmap_add_reserved_area;
wine_mmap_is_in_reserved_area;
wine_mmap_remove_reserved_area;
wine_pthread_abort_thread;
wine_pthread_create_thread;
wine_pthread_exit_thread;
@ -64,7 +66,6 @@ WINE_1.0
wine_pthread_init_thread;
wine_set_fs;
wine_set_gs;
wine_set_pe_load_area;
wine_switch_to_stack;
local: *;

View File

@ -29,11 +29,6 @@ int main( int argc, char *argv[] )
{
char error[1024];
#if 0
static char pe_load[256*1024*1024] __attribute__((aligned(4096)));
wine_set_pe_load_area( pe_load, sizeof(pe_load) );
#endif
wine_init( argc, argv, error, sizeof(error) );
fprintf( stderr, "wine: failed to initialize: %s\n", error );
exit(1);