2020-05-14 15:33:36 +02:00
|
|
|
/*
|
|
|
|
* Unix interface for virtual memory functions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2020 Alexandre Julliard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#pragma makedep unix
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
#include "wine/port.h"
|
|
|
|
|
|
|
|
#include <assert.h>
|
2020-06-01 12:52:05 +02:00
|
|
|
#include <signal.h>
|
2020-05-14 15:33:36 +02:00
|
|
|
#include <stdarg.h>
|
|
|
|
#ifdef HAVE_SYS_MMAN_H
|
|
|
|
# include <sys/mman.h>
|
|
|
|
#endif
|
|
|
|
#if defined(__APPLE__)
|
|
|
|
# include <mach/mach_init.h>
|
|
|
|
# include <mach/mach_vm.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <stdio.h>
|
|
|
|
|
|
|
|
#include "ntstatus.h"
|
|
|
|
#define WIN32_NO_STATUS
|
|
|
|
#define NONAMELESSUNION
|
|
|
|
#define NONAMELESSSTRUCT
|
|
|
|
#include "windef.h"
|
|
|
|
#include "winnt.h"
|
|
|
|
#include "winternl.h"
|
2020-05-17 11:04:47 +02:00
|
|
|
#include "unix_private.h"
|
2020-05-17 11:05:02 +02:00
|
|
|
#include "wine/list.h"
|
2020-05-14 15:33:36 +02:00
|
|
|
|
|
|
|
struct preload_info
|
|
|
|
{
|
|
|
|
void *addr;
|
|
|
|
size_t size;
|
|
|
|
};
|
|
|
|
|
2020-05-17 11:05:02 +02:00
|
|
|
struct reserved_area
|
|
|
|
{
|
|
|
|
struct list entry;
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct list reserved_areas = LIST_INIT(reserved_areas);
|
|
|
|
|
2020-06-01 12:52:05 +02:00
|
|
|
static const UINT page_shift = 12;
|
|
|
|
static const UINT_PTR page_mask = 0xfff;
|
|
|
|
|
2020-05-14 15:33:36 +02:00
|
|
|
static const unsigned int granularity_mask = 0xffff; /* reserved areas have 64k granularity */
|
|
|
|
|
2020-06-01 12:52:05 +02:00
|
|
|
SIZE_T signal_stack_size = 0;
|
|
|
|
SIZE_T signal_stack_mask = 0;
|
|
|
|
static SIZE_T signal_stack_align;
|
|
|
|
|
|
|
|
#define ROUND_ADDR(addr,mask) \
|
|
|
|
((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
|
|
|
|
|
|
|
|
#define ROUND_SIZE(addr,size) \
|
|
|
|
(((SIZE_T)(size) + ((UINT_PTR)(addr) & page_mask) + page_mask) & ~page_mask)
|
2020-05-14 15:33:36 +02:00
|
|
|
|
|
|
|
#ifndef MAP_NORESERVE
|
|
|
|
#define MAP_NORESERVE 0
|
|
|
|
#endif
|
|
|
|
#ifndef MAP_TRYFIXED
|
|
|
|
#define MAP_TRYFIXED 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
static void reserve_area( void *addr, void *end )
|
|
|
|
{
|
|
|
|
#ifdef __APPLE__
|
|
|
|
|
|
|
|
#ifdef __i386__
|
|
|
|
static const mach_vm_address_t max_address = VM_MAX_ADDRESS;
|
|
|
|
#else
|
|
|
|
static const mach_vm_address_t max_address = MACH_VM_MAX_ADDRESS;
|
|
|
|
#endif
|
|
|
|
mach_vm_address_t address = (mach_vm_address_t)addr;
|
|
|
|
mach_vm_address_t end_address = (mach_vm_address_t)end;
|
|
|
|
|
|
|
|
if (!end_address || max_address < end_address)
|
|
|
|
end_address = max_address;
|
|
|
|
|
|
|
|
while (address < end_address)
|
|
|
|
{
|
|
|
|
mach_vm_address_t hole_address = address;
|
|
|
|
kern_return_t ret;
|
|
|
|
mach_vm_size_t size;
|
|
|
|
vm_region_basic_info_data_64_t info;
|
|
|
|
mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
|
|
|
|
mach_port_t dummy_object_name = MACH_PORT_NULL;
|
|
|
|
|
|
|
|
/* find the mapped region at or above the current address. */
|
|
|
|
ret = mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64,
|
|
|
|
(vm_region_info_t)&info, &count, &dummy_object_name);
|
|
|
|
if (ret != KERN_SUCCESS)
|
|
|
|
{
|
|
|
|
address = max_address;
|
|
|
|
size = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (end_address < address)
|
|
|
|
address = end_address;
|
|
|
|
if (hole_address < address)
|
|
|
|
{
|
|
|
|
/* found a hole, attempt to reserve it. */
|
|
|
|
size_t hole_size = address - hole_address;
|
|
|
|
mach_vm_address_t alloc_address = hole_address;
|
|
|
|
|
|
|
|
ret = mach_vm_map( mach_task_self(), &alloc_address, hole_size, 0, VM_FLAGS_FIXED,
|
|
|
|
MEMORY_OBJECT_NULL, 0, 0, PROT_NONE, VM_PROT_ALL, VM_INHERIT_COPY );
|
2020-05-17 11:04:47 +02:00
|
|
|
if (!ret) mmap_add_reserved_area( (void*)hole_address, hole_size );
|
2020-05-14 15:33:36 +02:00
|
|
|
else if (ret == KERN_NO_SPACE)
|
|
|
|
{
|
|
|
|
/* something filled (part of) the hole before we could.
|
|
|
|
go back and look again. */
|
|
|
|
address = hole_address;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
address += size;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void *ptr;
|
|
|
|
int flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_TRYFIXED;
|
|
|
|
size_t size = (char *)end - (char *)addr;
|
|
|
|
|
|
|
|
if (!size) return;
|
|
|
|
|
2020-05-23 12:07:28 +02:00
|
|
|
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
|
|
|
|
ptr = mmap( addr, size, PROT_NONE, flags | MAP_FIXED | MAP_EXCL, -1, 0 );
|
|
|
|
#else
|
2020-05-14 15:33:36 +02:00
|
|
|
ptr = mmap( addr, size, PROT_NONE, flags, -1, 0 );
|
2020-05-23 12:07:28 +02:00
|
|
|
#endif
|
2020-05-14 15:33:36 +02:00
|
|
|
if (ptr == addr)
|
|
|
|
{
|
2020-05-17 11:04:47 +02:00
|
|
|
mmap_add_reserved_area( addr, size );
|
2020-05-14 15:33:36 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ptr != (void *)-1) munmap( ptr, size );
|
|
|
|
|
|
|
|
size = (size / 2) & ~granularity_mask;
|
|
|
|
if (size)
|
|
|
|
{
|
|
|
|
reserve_area( addr, (char *)addr + size );
|
|
|
|
reserve_area( (char *)addr + size, end );
|
|
|
|
}
|
|
|
|
#endif /* __APPLE__ */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void mmap_init( const struct preload_info *preload_info )
|
|
|
|
{
|
|
|
|
#ifdef __i386__
|
|
|
|
#ifndef __APPLE__
|
|
|
|
char stack;
|
|
|
|
char * const stack_ptr = &stack;
|
|
|
|
#endif
|
|
|
|
char *user_space_limit = (char *)0x7ffe0000;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (preload_info)
|
|
|
|
{
|
|
|
|
/* check for a reserved area starting at the user space limit */
|
|
|
|
/* to avoid wasting time trying to allocate it again */
|
|
|
|
for (i = 0; preload_info[i].size; i++)
|
|
|
|
{
|
|
|
|
if ((char *)preload_info[i].addr > user_space_limit) break;
|
|
|
|
if ((char *)preload_info[i].addr + preload_info[i].size > user_space_limit)
|
|
|
|
{
|
|
|
|
user_space_limit = (char *)preload_info[i].addr + preload_info[i].size;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else reserve_area( (void *)0x00010000, (void *)0x40000000 );
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef __APPLE__
|
|
|
|
if (stack_ptr >= user_space_limit)
|
|
|
|
{
|
|
|
|
char *end = 0;
|
|
|
|
char *base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) - (granularity_mask + 1);
|
|
|
|
if (base > user_space_limit) reserve_area( user_space_limit, base );
|
|
|
|
base = stack_ptr - ((unsigned int)stack_ptr & granularity_mask) + (granularity_mask + 1);
|
|
|
|
#if defined(linux) || defined(__FreeBSD__) || defined (__FreeBSD_kernel__) || defined(__DragonFly__)
|
|
|
|
/* Heuristic: assume the stack is near the end of the address */
|
|
|
|
/* space, this avoids a lot of futile allocation attempts */
|
|
|
|
end = (char *)(((unsigned long)base + 0x0fffffff) & 0xf0000000);
|
|
|
|
#endif
|
|
|
|
reserve_area( base, end );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
reserve_area( user_space_limit, 0 );
|
|
|
|
|
|
|
|
#elif defined(__x86_64__) || defined(__aarch64__)
|
|
|
|
|
|
|
|
if (preload_info) return;
|
|
|
|
/* if we don't have a preloader, try to reserve the space now */
|
|
|
|
reserve_area( (void *)0x000000010000, (void *)0x000068000000 );
|
|
|
|
reserve_area( (void *)0x00007ff00000, (void *)0x00007fff0000 );
|
|
|
|
reserve_area( (void *)0x7ffffe000000, (void *)0x7fffffff0000 );
|
|
|
|
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-05-17 11:04:47 +02:00
|
|
|
void CDECL mmap_add_reserved_area( void *addr, SIZE_T size )
|
|
|
|
{
|
2020-05-17 11:05:02 +02:00
|
|
|
struct reserved_area *area;
|
|
|
|
struct list *ptr;
|
|
|
|
|
|
|
|
if (!((char *)addr + size)) size--; /* avoid wrap-around */
|
|
|
|
|
|
|
|
LIST_FOR_EACH( ptr, &reserved_areas )
|
|
|
|
{
|
|
|
|
area = LIST_ENTRY( ptr, struct reserved_area, entry );
|
|
|
|
if (area->base > addr)
|
|
|
|
{
|
|
|
|
/* try to merge with the next one */
|
|
|
|
if ((char *)addr + size == (char *)area->base)
|
|
|
|
{
|
|
|
|
area->base = addr;
|
|
|
|
area->size += size;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else if ((char *)area->base + area->size == (char *)addr)
|
|
|
|
{
|
|
|
|
/* merge with the previous one */
|
|
|
|
area->size += size;
|
|
|
|
|
|
|
|
/* try to merge with the next one too */
|
|
|
|
if ((ptr = list_next( &reserved_areas, ptr )))
|
|
|
|
{
|
|
|
|
struct reserved_area *next = LIST_ENTRY( ptr, struct reserved_area, entry );
|
|
|
|
if ((char *)addr + size == (char *)next->base)
|
|
|
|
{
|
|
|
|
area->size += next->size;
|
|
|
|
list_remove( &next->entry );
|
|
|
|
free( next );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((area = malloc( sizeof(*area) )))
|
|
|
|
{
|
|
|
|
area->base = addr;
|
|
|
|
area->size = size;
|
|
|
|
list_add_before( ptr, &area->entry );
|
|
|
|
}
|
2020-05-17 11:04:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void CDECL mmap_remove_reserved_area( void *addr, SIZE_T size )
|
|
|
|
{
|
2020-05-17 11:05:02 +02:00
|
|
|
struct reserved_area *area;
|
|
|
|
struct list *ptr;
|
2020-05-17 11:04:47 +02:00
|
|
|
|
2020-05-17 11:05:02 +02:00
|
|
|
if (!((char *)addr + size)) size--; /* avoid wrap-around */
|
|
|
|
|
|
|
|
ptr = list_head( &reserved_areas );
|
|
|
|
/* find the first area covering address */
|
|
|
|
while (ptr)
|
|
|
|
{
|
|
|
|
area = LIST_ENTRY( ptr, struct reserved_area, entry );
|
|
|
|
if ((char *)area->base >= (char *)addr + size) break; /* outside the range */
|
|
|
|
if ((char *)area->base + area->size > (char *)addr) /* overlaps range */
|
|
|
|
{
|
|
|
|
if (area->base >= addr)
|
|
|
|
{
|
|
|
|
if ((char *)area->base + area->size > (char *)addr + size)
|
|
|
|
{
|
|
|
|
/* range overlaps beginning of area only -> shrink area */
|
|
|
|
area->size -= (char *)addr + size - (char *)area->base;
|
|
|
|
area->base = (char *)addr + size;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* range contains the whole area -> remove area completely */
|
|
|
|
ptr = list_next( &reserved_areas, ptr );
|
|
|
|
list_remove( &area->entry );
|
|
|
|
free( area );
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if ((char *)area->base + area->size > (char *)addr + size)
|
|
|
|
{
|
|
|
|
/* range is in the middle of area -> split area in two */
|
|
|
|
struct reserved_area *new_area = malloc( sizeof(*new_area) );
|
|
|
|
if (new_area)
|
|
|
|
{
|
|
|
|
new_area->base = (char *)addr + size;
|
|
|
|
new_area->size = (char *)area->base + area->size - (char *)new_area->base;
|
|
|
|
list_add_after( ptr, &new_area->entry );
|
|
|
|
}
|
|
|
|
else size = (char *)area->base + area->size - (char *)addr;
|
|
|
|
area->size = (char *)addr - (char *)area->base;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* range overlaps end of area only -> shrink area */
|
|
|
|
area->size = (char *)addr - (char *)area->base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ptr = list_next( &reserved_areas, ptr );
|
|
|
|
}
|
2020-05-17 11:04:47 +02:00
|
|
|
}
|
|
|
|
|
2020-05-17 11:05:02 +02:00
|
|
|
int CDECL mmap_is_in_reserved_area( void *addr, SIZE_T size )
|
2020-05-17 11:04:47 +02:00
|
|
|
{
|
2020-05-17 11:05:02 +02:00
|
|
|
struct reserved_area *area;
|
|
|
|
struct list *ptr;
|
2020-05-17 11:04:47 +02:00
|
|
|
|
2020-05-17 11:05:02 +02:00
|
|
|
LIST_FOR_EACH( ptr, &reserved_areas )
|
|
|
|
{
|
|
|
|
area = LIST_ENTRY( ptr, struct reserved_area, entry );
|
|
|
|
if (area->base > addr) break;
|
|
|
|
if ((char *)area->base + area->size <= (char *)addr) continue;
|
|
|
|
/* area must contain block completely */
|
|
|
|
if ((char *)area->base + area->size < (char *)addr + size) return -1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
2020-05-17 11:04:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int CDECL mmap_enum_reserved_areas( int (CDECL *enum_func)(void *base, SIZE_T size, void *arg),
|
|
|
|
void *arg, int top_down )
|
|
|
|
{
|
2020-05-17 11:05:02 +02:00
|
|
|
int ret = 0;
|
|
|
|
struct list *ptr;
|
|
|
|
|
|
|
|
if (top_down)
|
|
|
|
{
|
|
|
|
for (ptr = reserved_areas.prev; ptr != &reserved_areas; ptr = ptr->prev)
|
|
|
|
{
|
|
|
|
struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
|
|
|
|
if ((ret = enum_func( area->base, area->size, arg ))) break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
for (ptr = reserved_areas.next; ptr != &reserved_areas; ptr = ptr->next)
|
|
|
|
{
|
|
|
|
struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry );
|
|
|
|
if ((ret = enum_func( area->base, area->size, arg ))) break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
2020-05-17 11:04:47 +02:00
|
|
|
}
|
|
|
|
|
2020-05-14 15:33:36 +02:00
|
|
|
void virtual_init(void)
|
|
|
|
{
|
|
|
|
const struct preload_info **preload_info = dlsym( RTLD_DEFAULT, "wine_main_preload_info" );
|
2020-06-01 12:52:05 +02:00
|
|
|
size_t size;
|
2020-05-14 15:33:36 +02:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (preload_info && *preload_info)
|
|
|
|
for (i = 0; (*preload_info)[i].size; i++)
|
2020-05-17 11:04:47 +02:00
|
|
|
mmap_add_reserved_area( (*preload_info)[i].addr, (*preload_info)[i].size );
|
2020-05-14 15:33:36 +02:00
|
|
|
|
|
|
|
mmap_init( preload_info ? *preload_info : NULL );
|
2020-06-01 12:52:05 +02:00
|
|
|
|
|
|
|
size = ROUND_SIZE( 0, sizeof(TEB) ) + max( MINSIGSTKSZ, 8192 );
|
|
|
|
/* find the first power of two not smaller than size */
|
|
|
|
signal_stack_align = page_shift;
|
|
|
|
while ((1u << signal_stack_align) < size) signal_stack_align++;
|
|
|
|
signal_stack_mask = (1 << signal_stack_align) - 1;
|
|
|
|
signal_stack_size = (1 << signal_stack_align) - ROUND_SIZE( 0, sizeof(TEB) );
|
2020-05-14 15:33:36 +02:00
|
|
|
}
|