2002-09-17 20:54:42 +02:00
|
|
|
/*
|
|
|
|
* Win32 virtual memory functions
|
|
|
|
*
|
|
|
|
* Copyright 1997, 2002 Alexandre Julliard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
2006-05-18 14:49:52 +02:00
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
2002-09-17 20:54:42 +02:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "config.h"
|
|
|
|
#include "wine/port.h"
|
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#ifdef HAVE_UNISTD_H
|
|
|
|
# include <unistd.h>
|
|
|
|
#endif
|
2005-06-06 22:13:08 +02:00
|
|
|
#include <stdarg.h>
|
2002-09-17 20:54:42 +02:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
2020-04-28 13:26:34 +02:00
|
|
|
#include <signal.h>
|
2002-09-17 20:54:42 +02:00
|
|
|
#include <sys/types.h>
|
2017-10-03 13:36:40 +02:00
|
|
|
#ifdef HAVE_SYS_SOCKET_H
|
|
|
|
# include <sys/socket.h>
|
|
|
|
#endif
|
2006-02-07 21:17:45 +01:00
|
|
|
#ifdef HAVE_SYS_STAT_H
|
|
|
|
# include <sys/stat.h>
|
|
|
|
#endif
|
2002-09-17 20:54:42 +02:00
|
|
|
#ifdef HAVE_SYS_MMAN_H
|
2006-02-07 21:17:45 +01:00
|
|
|
# include <sys/mman.h>
|
2002-09-17 20:54:42 +02:00
|
|
|
#endif
|
2016-07-08 05:40:22 +02:00
|
|
|
#ifdef HAVE_SYS_SYSINFO_H
|
|
|
|
# include <sys/sysinfo.h>
|
|
|
|
#endif
|
2008-07-24 19:21:33 +02:00
|
|
|
#ifdef HAVE_VALGRIND_VALGRIND_H
|
|
|
|
# include <valgrind/valgrind.h>
|
2008-04-01 14:11:44 +02:00
|
|
|
#endif
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2003-09-06 01:08:26 +02:00
|
|
|
#include "ntstatus.h"
|
2005-11-28 17:32:54 +01:00
|
|
|
#define WIN32_NO_STATUS
|
2015-03-14 03:38:17 +01:00
|
|
|
#define NONAMELESSUNION
|
2005-06-06 22:13:08 +02:00
|
|
|
#include "windef.h"
|
2002-09-17 20:54:42 +02:00
|
|
|
#include "winternl.h"
|
|
|
|
#include "wine/library.h"
|
|
|
|
#include "wine/server.h"
|
2009-01-14 20:17:52 +01:00
|
|
|
#include "wine/exception.h"
|
2017-09-07 19:31:42 +02:00
|
|
|
#include "wine/rbtree.h"
|
2002-09-17 20:54:42 +02:00
|
|
|
#include "wine/debug.h"
|
2003-10-08 21:11:08 +02:00
|
|
|
#include "ntdll_misc.h"
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
WINE_DEFAULT_DEBUG_CHANNEL(virtual);
|
|
|
|
WINE_DECLARE_DEBUG_CHANNEL(module);
|
|
|
|
|
2004-05-25 03:29:24 +02:00
|
|
|
#ifndef MAP_NORESERVE
|
|
|
|
#define MAP_NORESERVE 0
|
|
|
|
#endif
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/* File view */
|
2010-10-19 15:34:36 +02:00
|
|
|
struct file_view
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
struct wine_rb_entry entry; /* entry in global view tree */
|
|
|
|
void *base; /* base address */
|
|
|
|
size_t size; /* size in bytes */
|
2017-09-08 15:37:12 +02:00
|
|
|
unsigned int protect; /* protection for all pages at allocation time and SEC_* flags */
|
2010-10-19 15:34:36 +02:00
|
|
|
};
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2017-09-12 11:42:04 +02:00
|
|
|
/* per-page protection flags */
|
|
|
|
#define VPROT_READ 0x01
|
|
|
|
#define VPROT_WRITE 0x02
|
|
|
|
#define VPROT_EXEC 0x04
|
|
|
|
#define VPROT_WRITECOPY 0x08
|
|
|
|
#define VPROT_GUARD 0x10
|
|
|
|
#define VPROT_COMMITTED 0x20
|
|
|
|
#define VPROT_WRITEWATCH 0x40
|
|
|
|
/* per-mapping protection flags */
|
|
|
|
#define VPROT_SYSTEM 0x0200 /* system view (underlying mmap not under our control) */
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
/* Conversion from VPROT_* to Win32 flags */
|
|
|
|
static const BYTE VIRTUAL_Win32Flags[16] =
|
|
|
|
{
|
|
|
|
PAGE_NOACCESS, /* 0 */
|
|
|
|
PAGE_READONLY, /* READ */
|
|
|
|
PAGE_READWRITE, /* WRITE */
|
|
|
|
PAGE_READWRITE, /* READ | WRITE */
|
|
|
|
PAGE_EXECUTE, /* EXEC */
|
|
|
|
PAGE_EXECUTE_READ, /* READ | EXEC */
|
|
|
|
PAGE_EXECUTE_READWRITE, /* WRITE | EXEC */
|
|
|
|
PAGE_EXECUTE_READWRITE, /* READ | WRITE | EXEC */
|
|
|
|
PAGE_WRITECOPY, /* WRITECOPY */
|
|
|
|
PAGE_WRITECOPY, /* READ | WRITECOPY */
|
|
|
|
PAGE_WRITECOPY, /* WRITE | WRITECOPY */
|
|
|
|
PAGE_WRITECOPY, /* READ | WRITE | WRITECOPY */
|
|
|
|
PAGE_EXECUTE_WRITECOPY, /* EXEC | WRITECOPY */
|
|
|
|
PAGE_EXECUTE_WRITECOPY, /* READ | EXEC | WRITECOPY */
|
|
|
|
PAGE_EXECUTE_WRITECOPY, /* WRITE | EXEC | WRITECOPY */
|
|
|
|
PAGE_EXECUTE_WRITECOPY /* READ | WRITE | EXEC | WRITECOPY */
|
|
|
|
};
|
|
|
|
|
2017-09-07 19:31:42 +02:00
|
|
|
static struct wine_rb_tree views_tree;
|
2003-08-13 01:50:54 +02:00
|
|
|
|
2005-06-25 20:00:57 +02:00
|
|
|
static RTL_CRITICAL_SECTION csVirtual;
|
|
|
|
static RTL_CRITICAL_SECTION_DEBUG critsect_debug =
|
2003-08-13 01:50:54 +02:00
|
|
|
{
|
|
|
|
0, 0, &csVirtual,
|
|
|
|
{ &critsect_debug.ProcessLocksList, &critsect_debug.ProcessLocksList },
|
2005-09-09 12:19:44 +02:00
|
|
|
0, 0, { (DWORD_PTR)(__FILE__ ": csVirtual") }
|
2003-08-13 01:50:54 +02:00
|
|
|
};
|
2005-06-25 20:00:57 +02:00
|
|
|
static RTL_CRITICAL_SECTION csVirtual = { &critsect_debug, -1, 0, 0, 0, 0 };
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
#ifdef __i386__
|
2013-01-08 14:00:06 +01:00
|
|
|
static const UINT page_shift = 12;
|
|
|
|
static const UINT_PTR page_mask = 0xfff;
|
2004-05-25 03:29:24 +02:00
|
|
|
/* Note: these are Windows limits, you cannot change them. */
|
2008-10-28 13:51:36 +01:00
|
|
|
static void *address_space_limit = (void *)0xc0000000; /* top of the total available address space */
|
|
|
|
static void *user_space_limit = (void *)0x7fff0000; /* top of the user address space */
|
2008-11-06 11:42:58 +01:00
|
|
|
static void *working_set_limit = (void *)0x7fff0000; /* top of the current working set */
|
2010-12-16 10:25:14 +01:00
|
|
|
static void *address_space_start = (void *)0x110000; /* keep DOS area clear */
|
2009-08-27 19:46:45 +02:00
|
|
|
#elif defined(__x86_64__)
|
2013-01-08 14:00:06 +01:00
|
|
|
static const UINT page_shift = 12;
|
|
|
|
static const UINT_PTR page_mask = 0xfff;
|
2009-08-27 19:46:45 +02:00
|
|
|
static void *address_space_limit = (void *)0x7fffffff0000;
|
|
|
|
static void *user_space_limit = (void *)0x7fffffff0000;
|
|
|
|
static void *working_set_limit = (void *)0x7fffffff0000;
|
2010-12-16 10:25:14 +01:00
|
|
|
static void *address_space_start = (void *)0x10000;
|
2019-10-28 21:07:53 +01:00
|
|
|
#elif defined(__arm__)
|
|
|
|
static const UINT page_shift = 12;
|
|
|
|
static const UINT_PTR page_mask = 0xfff;
|
|
|
|
static void *address_space_limit = (void *)0xc0000000;
|
|
|
|
static void *user_space_limit = (void *)0x7fff0000;
|
|
|
|
static void *working_set_limit = (void *)0x7fff0000;
|
|
|
|
static void *address_space_start = (void *)0x10000;
|
|
|
|
#elif defined(__aarch64__)
|
|
|
|
static const UINT page_shift = 12;
|
|
|
|
static const UINT_PTR page_mask = 0xfff;
|
|
|
|
static void *address_space_limit = (void *)0xffffffff0000;
|
|
|
|
static void *user_space_limit = (void *)0x7fffffff0000;
|
|
|
|
static void *working_set_limit = (void *)0x7fffffff0000;
|
|
|
|
static void *address_space_start = (void *)0x10000;
|
2002-09-17 20:54:42 +02:00
|
|
|
#else
|
2013-01-08 14:00:06 +01:00
|
|
|
UINT_PTR page_size = 0;
|
2002-09-17 20:54:42 +02:00
|
|
|
static UINT page_shift;
|
2005-09-16 20:54:19 +02:00
|
|
|
static UINT_PTR page_mask;
|
2008-11-07 11:04:07 +01:00
|
|
|
static void *address_space_limit;
|
|
|
|
static void *user_space_limit;
|
|
|
|
static void *working_set_limit;
|
2010-12-16 10:25:14 +01:00
|
|
|
static void *address_space_start = (void *)0x10000;
|
2002-09-17 20:54:42 +02:00
|
|
|
#endif /* __i386__ */
|
2013-11-02 14:34:13 +01:00
|
|
|
static const BOOL is_win64 = (sizeof(void *) > sizeof(int));
|
2020-04-30 10:07:36 +02:00
|
|
|
static const UINT_PTR granularity_mask = 0xffff;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2020-04-28 13:26:34 +02:00
|
|
|
SIZE_T signal_stack_size = 0;
|
|
|
|
SIZE_T signal_stack_mask = 0;
|
2020-04-28 13:34:57 +02:00
|
|
|
static SIZE_T signal_stack_align;
|
2020-04-28 13:26:34 +02:00
|
|
|
|
2020-04-30 10:05:56 +02:00
|
|
|
/* TEB allocation blocks */
|
|
|
|
static TEB *teb_block;
|
|
|
|
static TEB *next_free_teb;
|
|
|
|
static int teb_block_pos;
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
#define ROUND_ADDR(addr,mask) \
|
2005-09-16 20:54:19 +02:00
|
|
|
((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
#define ROUND_SIZE(addr,size) \
|
2008-11-14 17:40:54 +01:00
|
|
|
(((SIZE_T)(size) + ((UINT_PTR)(addr) & page_mask) + page_mask) & ~page_mask)
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
#define VIRTUAL_DEBUG_DUMP_VIEW(view) \
|
2005-09-26 11:57:38 +02:00
|
|
|
do { if (TRACE_ON(virtual)) VIRTUAL_DumpView(view); } while (0)
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2017-09-05 16:17:39 +02:00
|
|
|
#ifdef _WIN64 /* on 64-bit the page protection bytes use a 2-level table */
|
|
|
|
static const size_t pages_vprot_shift = 20;
|
|
|
|
static const size_t pages_vprot_mask = (1 << 20) - 1;
|
|
|
|
static size_t pages_vprot_size;
|
|
|
|
static BYTE **pages_vprot;
|
|
|
|
#else /* on 32-bit we use a simple array with one byte per page */
|
|
|
|
static BYTE *pages_vprot;
|
|
|
|
#endif
|
|
|
|
|
2017-09-06 09:58:18 +02:00
|
|
|
static struct file_view *view_block_start, *view_block_end, *next_free_view;
|
|
|
|
static const size_t view_block_size = 0x100000;
|
2006-07-24 14:00:19 +02:00
|
|
|
static void *preload_reserve_start;
|
|
|
|
static void *preload_reserve_end;
|
2013-11-02 14:34:13 +01:00
|
|
|
static BOOL use_locks;
|
|
|
|
static BOOL force_exec_prot; /* whether to force PROT_EXEC on all PROT_READ mmaps */
|
2004-06-18 02:26:57 +02:00
|
|
|
|
ntdll: Introduce free_ranges indexing sequence.
This is an ordered range sequence used to keep track of free address
ranges.
The sequence contains an entry for every free address range, with base
pointing to the first free address and end pointing to the next first
used address. It is initialized to [0, ~0] for convenience, so that
there's always a range before or after a view.
In the worst case scenario, where memory is entirely fragmented, there's
going to be one more range than allocated views, but in general there's
much less. In any case, because of cache locality, iterating in the
contiguous sequence is much faster than traversing the view rbtree.
In theory there can be a performance hit when allocating or deleting a
view, as we may have to move the end of the sequence when a range is
split or merged. But in practice and given the usually low number of
ranges, this is not an issue.
The default and maximum sequence size can hold up to 65536 ranges, which
is much more than enough in general, and performance is probably going
to be bad before reaching the limit anyway. The code currently asserts
when reaching the limit, although we could possibly grow the sequence.
Signed-off-by: Rémi Bernon <rbernon@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
2020-05-29 20:11:18 +02:00
|
|
|
struct range_entry
|
|
|
|
{
|
|
|
|
void *base;
|
|
|
|
void *end;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct range_entry *free_ranges;
|
|
|
|
static struct range_entry *free_ranges_end;
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* free_ranges_lower_bound
|
|
|
|
*
|
|
|
|
* Returns the first range whose end is not less than addr, or end if there's none.
|
|
|
|
*/
|
|
|
|
static struct range_entry *free_ranges_lower_bound( void *addr )
|
|
|
|
{
|
|
|
|
struct range_entry *begin = free_ranges;
|
|
|
|
struct range_entry *end = free_ranges_end;
|
|
|
|
struct range_entry *mid;
|
|
|
|
|
|
|
|
while (begin < end)
|
|
|
|
{
|
|
|
|
mid = begin + (end - begin) / 2;
|
|
|
|
if (mid->end < addr)
|
|
|
|
begin = mid + 1;
|
|
|
|
else
|
|
|
|
end = mid;
|
|
|
|
}
|
|
|
|
|
|
|
|
return begin;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* free_ranges_insert_view
|
|
|
|
*
|
|
|
|
* Updates the free_ranges after a new view has been created.
|
|
|
|
*/
|
|
|
|
static void free_ranges_insert_view( struct file_view *view )
|
|
|
|
{
|
|
|
|
void *view_base = ROUND_ADDR( view->base, granularity_mask );
|
|
|
|
void *view_end = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
|
|
|
|
struct range_entry *range = free_ranges_lower_bound( view_base );
|
|
|
|
struct range_entry *next = range + 1;
|
|
|
|
|
|
|
|
/* free_ranges initial value is such that the view is either inside range or before another one. */
|
|
|
|
assert( range != free_ranges_end );
|
|
|
|
assert( range->end > view_base || next != free_ranges_end );
|
|
|
|
|
|
|
|
/* this happens because virtual_alloc_thread_stack shrinks a view, then creates another one on top,
|
|
|
|
* or because AT_ROUND_TO_PAGE was used with NtMapViewOfSection to force 4kB aligned mapping. */
|
|
|
|
if ((range->end > view_base && range->base >= view_end) ||
|
|
|
|
(range->end == view_base && next->base >= view_end))
|
|
|
|
{
|
|
|
|
/* on Win64, assert that it's correctly aligned so we're not going to be in trouble later */
|
|
|
|
assert( (!is_win64 && !is_wow64) || view->base == view_base );
|
|
|
|
WARN( "range %p - %p is already mapped\n", view_base, view_end );
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this should never happen */
|
|
|
|
if (range->base > view_base || range->end < view_end)
|
|
|
|
ERR( "range %p - %p is already partially mapped\n", view_base, view_end );
|
|
|
|
assert( range->base <= view_base && range->end >= view_end );
|
|
|
|
|
|
|
|
/* need to split the range in two */
|
|
|
|
if (range->base < view_base && range->end > view_end)
|
|
|
|
{
|
|
|
|
memmove( next + 1, next, (free_ranges_end - next) * sizeof(struct range_entry) );
|
|
|
|
free_ranges_end += 1;
|
|
|
|
if ((char *)free_ranges_end - (char *)free_ranges > view_block_size)
|
|
|
|
MESSAGE( "Free range sequence is full, trouble ahead!\n" );
|
|
|
|
assert( (char *)free_ranges_end - (char *)free_ranges <= view_block_size );
|
|
|
|
|
|
|
|
next->base = view_end;
|
|
|
|
next->end = range->end;
|
|
|
|
range->end = view_base;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* otherwise we just have to shrink it */
|
|
|
|
if (range->base < view_base)
|
|
|
|
range->end = view_base;
|
|
|
|
else
|
|
|
|
range->base = view_end;
|
|
|
|
|
|
|
|
if (range->base < range->end) return;
|
|
|
|
|
|
|
|
/* and possibly remove it if it's now empty */
|
|
|
|
memmove( range, next, (free_ranges_end - next) * sizeof(struct range_entry) );
|
|
|
|
free_ranges_end -= 1;
|
|
|
|
assert( free_ranges_end - free_ranges > 0 );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* free_ranges_remove_view
|
|
|
|
*
|
|
|
|
* Updates the free_ranges after a view has been destroyed.
|
|
|
|
*/
|
|
|
|
static void free_ranges_remove_view( struct file_view *view )
|
|
|
|
{
|
|
|
|
void *view_base = ROUND_ADDR( view->base, granularity_mask );
|
|
|
|
void *view_end = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
|
|
|
|
struct range_entry *range = free_ranges_lower_bound( view_base );
|
|
|
|
struct range_entry *next = range + 1;
|
|
|
|
|
|
|
|
/* It's possible to use AT_ROUND_TO_PAGE on 32bit with NtMapViewOfSection to force 4kB alignment,
|
|
|
|
* and this breaks our assumptions. Look at the views around to check if the range is still in use. */
|
|
|
|
#ifndef _WIN64
|
|
|
|
struct file_view *prev_view = WINE_RB_ENTRY_VALUE( wine_rb_prev( &view->entry ), struct file_view, entry );
|
|
|
|
struct file_view *next_view = WINE_RB_ENTRY_VALUE( wine_rb_next( &view->entry ), struct file_view, entry );
|
|
|
|
void *prev_view_base = prev_view ? ROUND_ADDR( prev_view->base, granularity_mask ) : NULL;
|
|
|
|
void *prev_view_end = prev_view ? ROUND_ADDR( (char *)prev_view->base + prev_view->size + granularity_mask, granularity_mask ) : NULL;
|
|
|
|
void *next_view_base = next_view ? ROUND_ADDR( next_view->base, granularity_mask ) : NULL;
|
|
|
|
void *next_view_end = next_view ? ROUND_ADDR( (char *)next_view->base + next_view->size + granularity_mask, granularity_mask ) : NULL;
|
|
|
|
|
|
|
|
if ((prev_view_base < view_end && prev_view_end > view_base) ||
|
|
|
|
(next_view_base < view_end && next_view_end > view_base))
|
|
|
|
{
|
|
|
|
WARN( "range %p - %p is still mapped\n", view_base, view_end );
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* free_ranges initial value is such that the view is either inside range or before another one. */
|
|
|
|
assert( range != free_ranges_end );
|
|
|
|
assert( range->end > view_base || next != free_ranges_end );
|
|
|
|
|
|
|
|
/* this should never happen, but we can safely ignore it */
|
|
|
|
if (range->base <= view_base && range->end >= view_end)
|
|
|
|
{
|
|
|
|
WARN( "range %p - %p is already unmapped\n", view_base, view_end );
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this should never happen */
|
|
|
|
if (range->base < view_end && range->end > view_base)
|
|
|
|
ERR( "range %p - %p is already partially unmapped\n", view_base, view_end );
|
|
|
|
assert( range->end <= view_base || range->base >= view_end );
|
|
|
|
|
|
|
|
/* merge with next if possible */
|
|
|
|
if (range->end == view_base && next->base == view_end)
|
|
|
|
{
|
|
|
|
range->end = next->end;
|
|
|
|
memmove( next, next + 1, (free_ranges_end - next - 1) * sizeof(struct range_entry) );
|
|
|
|
free_ranges_end -= 1;
|
|
|
|
assert( free_ranges_end - free_ranges > 0 );
|
|
|
|
}
|
|
|
|
/* or try growing the range */
|
|
|
|
else if (range->end == view_base)
|
|
|
|
range->end = view_end;
|
|
|
|
else if (range->base == view_end)
|
|
|
|
range->base = view_base;
|
|
|
|
/* otherwise create a new one */
|
|
|
|
else
|
|
|
|
{
|
|
|
|
memmove( range + 1, range, (free_ranges_end - range) * sizeof(struct range_entry) );
|
|
|
|
free_ranges_end += 1;
|
|
|
|
if ((char *)free_ranges_end - (char *)free_ranges > view_block_size)
|
|
|
|
MESSAGE( "Free range sequence is full, trouble ahead!\n" );
|
|
|
|
assert( (char *)free_ranges_end - (char *)free_ranges <= view_block_size );
|
|
|
|
|
|
|
|
range->base = view_base;
|
|
|
|
range->end = view_end;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-12 12:59:28 +02:00
|
|
|
static inline int is_view_valloc( const struct file_view *view )
|
|
|
|
{
|
|
|
|
return !(view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT));
|
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2017-09-05 13:55:36 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* get_page_vprot
|
|
|
|
*
|
|
|
|
* Return the page protection byte.
|
|
|
|
*/
|
2017-09-05 16:20:23 +02:00
|
|
|
static BYTE get_page_vprot( const void *addr )
|
2017-09-05 13:55:36 +02:00
|
|
|
{
|
2017-09-05 16:17:39 +02:00
|
|
|
size_t idx = (size_t)addr >> page_shift;
|
|
|
|
|
|
|
|
#ifdef _WIN64
|
2017-11-16 14:14:01 +01:00
|
|
|
if ((idx >> pages_vprot_shift) >= pages_vprot_size) return 0;
|
2017-09-22 12:13:50 +02:00
|
|
|
if (!pages_vprot[idx >> pages_vprot_shift]) return 0;
|
2017-09-05 16:17:39 +02:00
|
|
|
return pages_vprot[idx >> pages_vprot_shift][idx & pages_vprot_mask];
|
|
|
|
#else
|
|
|
|
return pages_vprot[idx];
|
|
|
|
#endif
|
2017-09-05 13:55:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* set_page_vprot
|
|
|
|
*
|
|
|
|
* Set a range of page protection bytes.
|
|
|
|
*/
|
2017-09-05 16:20:23 +02:00
|
|
|
static void set_page_vprot( const void *addr, size_t size, BYTE vprot )
|
2017-09-05 13:55:36 +02:00
|
|
|
{
|
2017-09-05 16:17:39 +02:00
|
|
|
size_t idx = (size_t)addr >> page_shift;
|
|
|
|
size_t end = ((size_t)addr + size + page_mask) >> page_shift;
|
|
|
|
|
|
|
|
#ifdef _WIN64
|
|
|
|
while (idx >> pages_vprot_shift != end >> pages_vprot_shift)
|
|
|
|
{
|
|
|
|
size_t dir_size = pages_vprot_mask + 1 - (idx & pages_vprot_mask);
|
|
|
|
memset( pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask), vprot, dir_size );
|
|
|
|
idx += dir_size;
|
|
|
|
}
|
|
|
|
memset( pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask), vprot, end - idx );
|
|
|
|
#else
|
|
|
|
memset( pages_vprot + idx, vprot, end - idx );
|
|
|
|
#endif
|
2017-09-05 13:55:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-05 14:18:06 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* set_page_vprot_bits
|
|
|
|
*
|
|
|
|
* Set or clear bits in a range of page protection bytes.
|
|
|
|
*/
|
2017-09-05 16:20:23 +02:00
|
|
|
static void set_page_vprot_bits( const void *addr, size_t size, BYTE set, BYTE clear )
|
2017-09-05 14:18:06 +02:00
|
|
|
{
|
2017-09-05 16:17:39 +02:00
|
|
|
size_t idx = (size_t)addr >> page_shift;
|
|
|
|
size_t end = ((size_t)addr + size + page_mask) >> page_shift;
|
|
|
|
|
|
|
|
#ifdef _WIN64
|
|
|
|
for ( ; idx < end; idx++)
|
|
|
|
{
|
|
|
|
BYTE *ptr = pages_vprot[idx >> pages_vprot_shift] + (idx & pages_vprot_mask);
|
|
|
|
*ptr = (*ptr & ~clear) | set;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
for ( ; idx < end; idx++) pages_vprot[idx] = (pages_vprot[idx] & ~clear) | set;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* alloc_pages_vprot
|
|
|
|
*
|
|
|
|
* Allocate the page protection bytes for a given range.
|
|
|
|
*/
|
|
|
|
static BOOL alloc_pages_vprot( const void *addr, size_t size )
|
|
|
|
{
|
|
|
|
#ifdef _WIN64
|
|
|
|
size_t idx = (size_t)addr >> page_shift;
|
|
|
|
size_t end = ((size_t)addr + size + page_mask) >> page_shift;
|
2017-09-05 14:18:06 +02:00
|
|
|
size_t i;
|
2017-09-05 16:17:39 +02:00
|
|
|
void *ptr;
|
2017-09-05 14:18:06 +02:00
|
|
|
|
2017-09-05 16:17:39 +02:00
|
|
|
assert( end <= pages_vprot_size << pages_vprot_shift );
|
|
|
|
for (i = idx >> pages_vprot_shift; i < (end + pages_vprot_mask) >> pages_vprot_shift; i++)
|
|
|
|
{
|
|
|
|
if (pages_vprot[i]) continue;
|
|
|
|
if ((ptr = wine_anon_mmap( NULL, pages_vprot_mask + 1, PROT_READ | PROT_WRITE, 0 )) == (void *)-1)
|
|
|
|
return FALSE;
|
|
|
|
pages_vprot[i] = ptr;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return TRUE;
|
2017-09-05 14:18:06 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-07 19:31:42 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* compare_view
|
|
|
|
*
|
|
|
|
* View comparison function used for the rb tree.
|
|
|
|
*/
|
|
|
|
static int compare_view( const void *addr, const struct wine_rb_entry *entry )
|
|
|
|
{
|
|
|
|
struct file_view *view = WINE_RB_ENTRY_VALUE( entry, struct file_view, entry );
|
|
|
|
|
|
|
|
if (addr < view->base) return -1;
|
|
|
|
if (addr > view->base) return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* VIRTUAL_GetProtStr
|
|
|
|
*/
|
|
|
|
static const char *VIRTUAL_GetProtStr( BYTE prot )
|
|
|
|
{
|
|
|
|
static char buffer[6];
|
|
|
|
buffer[0] = (prot & VPROT_COMMITTED) ? 'c' : '-';
|
2009-07-09 15:21:59 +02:00
|
|
|
buffer[1] = (prot & VPROT_GUARD) ? 'g' : ((prot & VPROT_WRITEWATCH) ? 'H' : '-');
|
2002-09-17 20:54:42 +02:00
|
|
|
buffer[2] = (prot & VPROT_READ) ? 'r' : '-';
|
2004-05-21 22:58:44 +02:00
|
|
|
buffer[3] = (prot & VPROT_WRITECOPY) ? 'W' : ((prot & VPROT_WRITE) ? 'w' : '-');
|
2002-09-17 20:54:42 +02:00
|
|
|
buffer[4] = (prot & VPROT_EXEC) ? 'x' : '-';
|
|
|
|
buffer[5] = 0;
|
|
|
|
return buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-12-05 15:42:29 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* VIRTUAL_GetUnixProt
|
|
|
|
*
|
|
|
|
* Convert page protections to protection for mmap/mprotect.
|
|
|
|
*/
|
|
|
|
static int VIRTUAL_GetUnixProt( BYTE vprot )
|
|
|
|
{
|
|
|
|
int prot = 0;
|
|
|
|
if ((vprot & VPROT_COMMITTED) && !(vprot & VPROT_GUARD))
|
|
|
|
{
|
|
|
|
if (vprot & VPROT_READ) prot |= PROT_READ;
|
2011-10-14 06:47:37 +02:00
|
|
|
if (vprot & VPROT_WRITE) prot |= PROT_WRITE | PROT_READ;
|
|
|
|
if (vprot & VPROT_WRITECOPY) prot |= PROT_WRITE | PROT_READ;
|
2011-10-14 05:16:00 +02:00
|
|
|
if (vprot & VPROT_EXEC) prot |= PROT_EXEC | PROT_READ;
|
2008-11-25 12:07:35 +01:00
|
|
|
if (vprot & VPROT_WRITEWATCH) prot &= ~PROT_WRITE;
|
2006-12-05 15:42:29 +01:00
|
|
|
}
|
|
|
|
if (!prot) prot = PROT_NONE;
|
|
|
|
return prot;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* VIRTUAL_DumpView
|
|
|
|
*/
|
2010-10-19 15:34:36 +02:00
|
|
|
static void VIRTUAL_DumpView( struct file_view *view )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
|
|
|
UINT i, count;
|
|
|
|
char *addr = view->base;
|
2017-09-05 16:20:23 +02:00
|
|
|
BYTE prot = get_page_vprot( addr );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2005-09-25 17:23:21 +02:00
|
|
|
TRACE( "View: %p - %p", addr, addr + view->size - 1 );
|
2008-11-04 13:05:32 +01:00
|
|
|
if (view->protect & VPROT_SYSTEM)
|
2017-09-11 19:51:43 +02:00
|
|
|
TRACE( " (builtin image)\n" );
|
|
|
|
else if (view->protect & SEC_IMAGE)
|
2017-09-26 15:03:38 +02:00
|
|
|
TRACE( " (image)\n" );
|
2017-09-11 19:51:43 +02:00
|
|
|
else if (view->protect & SEC_FILE)
|
2017-09-26 15:03:38 +02:00
|
|
|
TRACE( " (file)\n" );
|
2017-09-11 19:51:43 +02:00
|
|
|
else if (view->protect & (SEC_RESERVE | SEC_COMMIT))
|
2017-09-26 15:03:38 +02:00
|
|
|
TRACE( " (anonymous)\n" );
|
2002-09-17 20:54:42 +02:00
|
|
|
else
|
2017-09-11 19:51:43 +02:00
|
|
|
TRACE( " (valloc)\n");
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
for (count = i = 1; i < view->size >> page_shift; i++, count++)
|
|
|
|
{
|
2017-09-05 16:20:23 +02:00
|
|
|
BYTE next = get_page_vprot( addr + (count << page_shift) );
|
2017-09-05 13:55:36 +02:00
|
|
|
if (next == prot) continue;
|
2005-09-25 17:23:21 +02:00
|
|
|
TRACE( " %p - %p %s\n",
|
2002-09-17 20:54:42 +02:00
|
|
|
addr, addr + (count << page_shift) - 1, VIRTUAL_GetProtStr(prot) );
|
|
|
|
addr += (count << page_shift);
|
2017-09-05 13:55:36 +02:00
|
|
|
prot = next;
|
2002-09-17 20:54:42 +02:00
|
|
|
count = 0;
|
|
|
|
}
|
|
|
|
if (count)
|
2005-09-25 17:23:21 +02:00
|
|
|
TRACE( " %p - %p %s\n",
|
2002-09-17 20:54:42 +02:00
|
|
|
addr, addr + (count << page_shift) - 1, VIRTUAL_GetProtStr(prot) );
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* VIRTUAL_Dump
|
|
|
|
*/
|
2009-12-07 16:07:29 +01:00
|
|
|
#ifdef WINE_VM_DEBUG
|
2006-06-20 18:53:45 +02:00
|
|
|
static void VIRTUAL_Dump(void)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2005-05-23 11:51:02 +02:00
|
|
|
struct file_view *view;
|
2004-05-21 22:58:44 +02:00
|
|
|
|
2005-09-25 17:23:21 +02:00
|
|
|
TRACE( "Dump of all virtual memory views:\n" );
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2017-09-07 19:31:42 +02:00
|
|
|
WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2005-05-23 11:51:02 +02:00
|
|
|
VIRTUAL_DumpView( view );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2006-06-20 18:53:45 +02:00
|
|
|
#endif
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* VIRTUAL_FindView
|
|
|
|
*
|
2004-04-06 22:16:51 +02:00
|
|
|
* Find the view containing a given address. The csVirtual section must be held by caller.
|
2002-09-17 20:54:42 +02:00
|
|
|
*
|
2005-05-18 11:45:12 +02:00
|
|
|
* PARAMS
|
|
|
|
* addr [I] Address
|
|
|
|
*
|
2002-09-17 20:54:42 +02:00
|
|
|
* RETURNS
|
|
|
|
* View: Success
|
|
|
|
* NULL: Failure
|
|
|
|
*/
|
2008-11-25 11:58:50 +01:00
|
|
|
static struct file_view *VIRTUAL_FindView( const void *addr, size_t size )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
struct wine_rb_entry *ptr = views_tree.root;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2017-09-07 19:31:42 +02:00
|
|
|
if ((const char *)addr + size < (const char *)addr) return NULL; /* overflow */
|
|
|
|
|
|
|
|
while (ptr)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
|
|
|
|
|
|
|
|
if (view->base > addr) ptr = ptr->left;
|
|
|
|
else if ((const char *)view->base + view->size <= (const char *)addr) ptr = ptr->right;
|
|
|
|
else if ((const char *)view->base + view->size < (const char *)addr + size) break; /* size too large */
|
|
|
|
else return view;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2004-05-21 22:58:44 +02:00
|
|
|
return NULL;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-01 10:07:44 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* zero_bits_win_to_64
|
|
|
|
*
|
|
|
|
* Convert from Windows hybrid 32bit-based / bitmask to 64bit-based format
|
|
|
|
*/
|
|
|
|
static inline unsigned short zero_bits_win_to_64( ULONG_PTR zero_bits )
|
|
|
|
{
|
|
|
|
unsigned short zero_bits_64;
|
|
|
|
|
|
|
|
if (zero_bits == 0) return 0;
|
|
|
|
if (zero_bits < 32) return 32 + zero_bits;
|
|
|
|
zero_bits_64 = 63;
|
|
|
|
#ifdef _WIN64
|
|
|
|
if (zero_bits >> 32) { zero_bits_64 -= 32; zero_bits >>= 32; }
|
|
|
|
#endif
|
|
|
|
if (zero_bits >> 16) { zero_bits_64 -= 16; zero_bits >>= 16; }
|
|
|
|
if (zero_bits >> 8) { zero_bits_64 -= 8; zero_bits >>= 8; }
|
|
|
|
if (zero_bits >> 4) { zero_bits_64 -= 4; zero_bits >>= 4; }
|
|
|
|
if (zero_bits >> 2) { zero_bits_64 -= 2; zero_bits >>= 2; }
|
|
|
|
if (zero_bits >> 1) { zero_bits_64 -= 1; }
|
|
|
|
return zero_bits_64;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-08-01 10:07:45 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* get_zero_bits_64_mask
|
|
|
|
*/
|
|
|
|
static inline UINT_PTR get_zero_bits_64_mask( USHORT zero_bits_64 )
|
|
|
|
{
|
|
|
|
return (UINT_PTR)((~(UINT64)0) >> zero_bits_64);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 12:15:27 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* is_write_watch_range
|
|
|
|
*/
|
|
|
|
static inline BOOL is_write_watch_range( const void *addr, size_t size )
|
|
|
|
{
|
|
|
|
struct file_view *view = VIRTUAL_FindView( addr, size );
|
|
|
|
return view && (view->protect & VPROT_WRITEWATCH);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-01-16 05:52:17 +01:00
|
|
|
/***********************************************************************
|
2004-05-25 03:29:24 +02:00
|
|
|
* find_view_range
|
2004-04-06 22:16:51 +02:00
|
|
|
*
|
2004-05-25 03:29:24 +02:00
|
|
|
* Find the first view overlapping at least part of the specified range.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
|
|
|
static struct file_view *find_view_range( const void *addr, size_t size )
|
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
struct wine_rb_entry *ptr = views_tree.root;
|
2004-05-25 03:29:24 +02:00
|
|
|
|
2017-09-07 19:31:42 +02:00
|
|
|
while (ptr)
|
2004-05-25 03:29:24 +02:00
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
|
|
|
|
|
|
|
|
if ((const char *)view->base >= (const char *)addr + size) ptr = ptr->left;
|
|
|
|
else if ((const char *)view->base + view->size <= (const char *)addr) ptr = ptr->right;
|
|
|
|
else return view;
|
2004-05-25 03:29:24 +02:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2019-12-26 22:27:26 +01:00
|
|
|
|
2006-07-24 14:00:19 +02:00
|
|
|
/***********************************************************************
|
2019-12-26 22:27:26 +01:00
|
|
|
* find_view_inside_range
|
2006-07-24 14:00:19 +02:00
|
|
|
*
|
2019-12-26 22:27:26 +01:00
|
|
|
* Find first (resp. last, if top_down) view inside a range.
|
2006-07-24 14:00:19 +02:00
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
2019-12-26 22:27:26 +01:00
|
|
|
static struct wine_rb_entry *find_view_inside_range( void **base_ptr, void **end_ptr, int top_down )
|
2006-07-24 14:00:19 +02:00
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
struct wine_rb_entry *first = NULL, *ptr = views_tree.root;
|
2019-12-26 22:27:26 +01:00
|
|
|
void *base = *base_ptr, *end = *end_ptr;
|
2006-07-24 14:00:19 +02:00
|
|
|
|
2017-09-07 19:31:42 +02:00
|
|
|
/* find the first (resp. last) view inside the range */
|
|
|
|
while (ptr)
|
|
|
|
{
|
|
|
|
struct file_view *view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
|
|
|
|
if ((char *)view->base + view->size >= (char *)end)
|
|
|
|
{
|
|
|
|
end = min( end, view->base );
|
|
|
|
ptr = ptr->left;
|
|
|
|
}
|
|
|
|
else if (view->base <= base)
|
|
|
|
{
|
|
|
|
base = max( (char *)base, (char *)view->base + view->size );
|
|
|
|
ptr = ptr->right;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
first = ptr;
|
|
|
|
ptr = top_down ? ptr->right : ptr->left;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-26 22:27:26 +01:00
|
|
|
*base_ptr = base;
|
|
|
|
*end_ptr = end;
|
|
|
|
return first;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-26 22:27:27 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* try_map_free_area
|
|
|
|
*
|
|
|
|
* Try mmaping some expected free memory region, eventually stepping and
|
|
|
|
* retrying inside it, and return where it actually succeeded, or NULL.
|
|
|
|
*/
|
|
|
|
static void* try_map_free_area( void *base, void *end, ptrdiff_t step,
|
|
|
|
void *start, size_t size, int unix_prot )
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
while (start && base <= start && (char*)start + size <= (char*)end)
|
|
|
|
{
|
|
|
|
if ((ptr = wine_anon_mmap( start, size, unix_prot, 0 )) == start)
|
|
|
|
return start;
|
|
|
|
TRACE( "Found free area is already mapped, start %p.\n", start );
|
|
|
|
|
|
|
|
if (ptr != (void *)-1)
|
|
|
|
munmap( ptr, size );
|
|
|
|
|
|
|
|
if ((step > 0 && (char *)end - (char *)start < step) ||
|
|
|
|
(step < 0 && (char *)start - (char *)base < -step) ||
|
|
|
|
step == 0)
|
|
|
|
break;
|
|
|
|
start = (char *)start + step;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* map_free_area
|
|
|
|
*
|
|
|
|
* Find a free area between views inside the specified range and map it.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
2020-04-30 10:07:36 +02:00
|
|
|
static void *map_free_area( void *base, void *end, size_t size, int top_down, int unix_prot )
|
2019-12-26 22:27:27 +01:00
|
|
|
{
|
|
|
|
struct wine_rb_entry *first = find_view_inside_range( &base, &end, top_down );
|
2020-04-30 10:07:36 +02:00
|
|
|
ptrdiff_t step = top_down ? -(granularity_mask + 1) : (granularity_mask + 1);
|
2019-12-26 22:27:27 +01:00
|
|
|
void *start;
|
|
|
|
|
|
|
|
if (top_down)
|
|
|
|
{
|
2020-04-30 10:07:36 +02:00
|
|
|
start = ROUND_ADDR( (char *)end - size, granularity_mask );
|
2019-12-26 22:27:27 +01:00
|
|
|
if (start >= end || start < base) return NULL;
|
|
|
|
|
|
|
|
while (first)
|
|
|
|
{
|
|
|
|
struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry );
|
|
|
|
if ((start = try_map_free_area( (char *)view->base + view->size, (char *)start + size, step,
|
|
|
|
start, size, unix_prot ))) break;
|
2020-04-30 10:07:36 +02:00
|
|
|
start = ROUND_ADDR( (char *)view->base - size, granularity_mask );
|
2019-12-26 22:27:27 +01:00
|
|
|
/* stop if remaining space is not large enough */
|
|
|
|
if (!start || start >= end || start < base) return NULL;
|
|
|
|
first = wine_rb_prev( first );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2020-04-30 10:07:36 +02:00
|
|
|
start = ROUND_ADDR( (char *)base + granularity_mask, granularity_mask );
|
2019-12-26 22:27:27 +01:00
|
|
|
if (!start || start >= end || (char *)end - (char *)start < size) return NULL;
|
|
|
|
|
|
|
|
while (first)
|
|
|
|
{
|
|
|
|
struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry );
|
|
|
|
if ((start = try_map_free_area( start, view->base, step,
|
|
|
|
start, size, unix_prot ))) break;
|
2020-04-30 10:07:36 +02:00
|
|
|
start = ROUND_ADDR( (char *)view->base + view->size + granularity_mask, granularity_mask );
|
2019-12-26 22:27:27 +01:00
|
|
|
/* stop if remaining space is not large enough */
|
|
|
|
if (!start || start >= end || (char *)end - (char *)start < size) return NULL;
|
|
|
|
first = wine_rb_next( first );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!first)
|
|
|
|
return try_map_free_area( base, end, step, start, size, unix_prot );
|
|
|
|
|
|
|
|
return start;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2019-12-26 22:27:26 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* find_reserved_free_area
|
|
|
|
*
|
|
|
|
* Find a free area between views inside the specified range.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
* The range must be inside the preloader reserved range.
|
|
|
|
*/
|
2020-04-30 10:07:36 +02:00
|
|
|
static void *find_reserved_free_area( void *base, void *end, size_t size, int top_down )
|
2019-12-26 22:27:26 +01:00
|
|
|
{
|
2020-05-29 20:11:19 +02:00
|
|
|
struct range_entry *range;
|
2019-12-26 22:27:26 +01:00
|
|
|
void *start;
|
|
|
|
|
2020-05-29 20:11:19 +02:00
|
|
|
base = ROUND_ADDR( (char *)base + granularity_mask, granularity_mask );
|
|
|
|
end = (char *)ROUND_ADDR( (char *)end - size, granularity_mask ) + size;
|
|
|
|
|
2006-07-24 14:00:19 +02:00
|
|
|
if (top_down)
|
|
|
|
{
|
2020-05-29 20:11:19 +02:00
|
|
|
start = (char *)end - size;
|
|
|
|
range = free_ranges_lower_bound( start );
|
|
|
|
assert(range != free_ranges_end && range->end >= start);
|
2006-07-24 14:00:19 +02:00
|
|
|
|
2020-05-29 20:11:19 +02:00
|
|
|
if ((char *)range->end - (char *)start < size) start = ROUND_ADDR( (char *)range->end - size, granularity_mask );
|
|
|
|
do
|
2006-07-24 14:00:19 +02:00
|
|
|
{
|
2020-05-29 20:11:19 +02:00
|
|
|
if (start >= end || start < base || (char *)end - (char *)start < size) return NULL;
|
|
|
|
if (start < range->end && start >= range->base && (char *)range->end - (char *)start >= size) break;
|
|
|
|
if (--range < free_ranges) return NULL;
|
|
|
|
start = ROUND_ADDR( (char *)range->end - size, granularity_mask );
|
2006-07-24 14:00:19 +02:00
|
|
|
}
|
2020-05-29 20:11:19 +02:00
|
|
|
while (1);
|
2006-07-24 14:00:19 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2020-05-29 20:11:19 +02:00
|
|
|
start = base;
|
|
|
|
range = free_ranges_lower_bound( start );
|
|
|
|
assert(range != free_ranges_end && range->end >= start);
|
2006-07-24 14:00:19 +02:00
|
|
|
|
2020-05-29 20:11:19 +02:00
|
|
|
if (start < range->base) start = ROUND_ADDR( (char *)range->base + granularity_mask, granularity_mask );
|
|
|
|
do
|
2006-07-24 14:00:19 +02:00
|
|
|
{
|
2020-05-29 20:11:19 +02:00
|
|
|
if (start >= end || start < base || (char *)end - (char *)start < size) return NULL;
|
|
|
|
if (start < range->end && start >= range->base && (char *)range->end - (char *)start >= size) break;
|
|
|
|
if (++range == free_ranges_end) return NULL;
|
|
|
|
start = ROUND_ADDR( (char *)range->base + granularity_mask, granularity_mask );
|
2006-07-24 14:00:19 +02:00
|
|
|
}
|
2020-05-29 20:11:19 +02:00
|
|
|
while (1);
|
2006-07-24 14:00:19 +02:00
|
|
|
}
|
|
|
|
return start;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-05-25 03:29:24 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* add_reserved_area
|
2004-01-16 05:52:17 +01:00
|
|
|
*
|
2004-05-25 03:29:24 +02:00
|
|
|
* Add a reserved area to the list maintained by libwine.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
|
|
|
static void add_reserved_area( void *addr, size_t size )
|
|
|
|
{
|
|
|
|
TRACE( "adding %p-%p\n", addr, (char *)addr + size );
|
|
|
|
|
2004-06-18 02:26:57 +02:00
|
|
|
if (addr < user_space_limit)
|
2004-05-25 03:29:24 +02:00
|
|
|
{
|
|
|
|
/* unmap the part of the area that is below the limit */
|
2004-06-18 02:26:57 +02:00
|
|
|
assert( (char *)addr + size > (char *)user_space_limit );
|
|
|
|
munmap( addr, (char *)user_space_limit - (char *)addr );
|
|
|
|
size -= (char *)user_space_limit - (char *)addr;
|
|
|
|
addr = user_space_limit;
|
2004-05-25 03:29:24 +02:00
|
|
|
}
|
2004-05-28 05:48:09 +02:00
|
|
|
/* blow away existing mappings */
|
|
|
|
wine_anon_mmap( addr, size, PROT_NONE, MAP_NORESERVE | MAP_FIXED );
|
2020-05-17 11:04:47 +02:00
|
|
|
unix_funcs->mmap_add_reserved_area( addr, size );
|
2004-05-25 03:29:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-06-25 14:18:53 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* remove_reserved_area
|
|
|
|
*
|
|
|
|
* Remove a reserved area from the list maintained by libwine.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
|
|
|
static void remove_reserved_area( void *addr, size_t size )
|
|
|
|
{
|
|
|
|
struct file_view *view;
|
|
|
|
|
|
|
|
TRACE( "removing %p-%p\n", addr, (char *)addr + size );
|
2020-05-17 11:04:47 +02:00
|
|
|
unix_funcs->mmap_remove_reserved_area( addr, size );
|
2009-06-25 14:18:53 +02:00
|
|
|
|
|
|
|
/* unmap areas not covered by an existing view */
|
2017-09-07 19:31:42 +02:00
|
|
|
WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
|
2009-06-25 14:18:53 +02:00
|
|
|
{
|
2017-09-13 10:11:54 +02:00
|
|
|
if ((char *)view->base >= (char *)addr + size) break;
|
2009-06-25 14:18:53 +02:00
|
|
|
if ((char *)view->base + view->size <= (char *)addr) continue;
|
|
|
|
if (view->base > addr) munmap( addr, (char *)view->base - (char *)addr );
|
2017-09-13 10:11:54 +02:00
|
|
|
if ((char *)view->base + view->size > (char *)addr + size) return;
|
2009-06-25 14:18:53 +02:00
|
|
|
size = (char *)addr + size - ((char *)view->base + view->size);
|
|
|
|
addr = (char *)view->base + view->size;
|
|
|
|
}
|
2017-09-13 10:11:54 +02:00
|
|
|
munmap( addr, size );
|
2009-06-25 14:18:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-07-10 15:21:32 +02:00
|
|
|
struct area_boundary
|
|
|
|
{
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
void *boundary;
|
|
|
|
};
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* get_area_boundary_callback
|
|
|
|
*
|
|
|
|
* Get lowest boundary address between reserved area and non-reserved area
|
|
|
|
* in the specified region. If no boundaries are found, result is NULL.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
2020-05-17 11:04:47 +02:00
|
|
|
static int CDECL get_area_boundary_callback( void *start, SIZE_T size, void *arg )
|
2017-07-10 15:21:32 +02:00
|
|
|
{
|
|
|
|
struct area_boundary *area = arg;
|
|
|
|
void *end = (char *)start + size;
|
|
|
|
|
|
|
|
area->boundary = NULL;
|
|
|
|
if (area->base >= end) return 0;
|
|
|
|
if ((char *)start >= (char *)area->base + area->size) return 1;
|
|
|
|
if (area->base >= start)
|
|
|
|
{
|
|
|
|
if ((char *)area->base + area->size > (char *)end)
|
|
|
|
{
|
|
|
|
area->boundary = end;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
area->boundary = start;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-05-25 03:29:24 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* is_beyond_limit
|
|
|
|
*
|
|
|
|
* Check if an address range goes beyond a given limit.
|
|
|
|
*/
|
2013-11-02 14:34:13 +01:00
|
|
|
static inline BOOL is_beyond_limit( const void *addr, size_t size, const void *limit )
|
2004-05-25 03:29:24 +02:00
|
|
|
{
|
2008-11-07 11:04:07 +01:00
|
|
|
return (addr >= limit || (const char *)addr + size > (const char *)limit);
|
2004-05-25 03:29:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* unmap_area
|
|
|
|
*
|
|
|
|
* Unmap an area, or simply replace it by an empty mapping if it is
|
|
|
|
* in a reserved area. The csVirtual section must be held by caller.
|
|
|
|
*/
|
|
|
|
static inline void unmap_area( void *addr, size_t size )
|
|
|
|
{
|
2020-05-17 11:04:47 +02:00
|
|
|
switch (unix_funcs->mmap_is_in_reserved_area( addr, size ))
|
2017-07-10 15:21:32 +02:00
|
|
|
{
|
|
|
|
case -1: /* partially in a reserved area */
|
|
|
|
{
|
|
|
|
struct area_boundary area;
|
|
|
|
size_t lower_size;
|
|
|
|
area.base = addr;
|
|
|
|
area.size = size;
|
2020-05-17 11:04:47 +02:00
|
|
|
unix_funcs->mmap_enum_reserved_areas( get_area_boundary_callback, &area, 0 );
|
2017-07-10 15:21:32 +02:00
|
|
|
assert( area.boundary );
|
|
|
|
lower_size = (char *)area.boundary - (char *)addr;
|
|
|
|
unmap_area( addr, lower_size );
|
|
|
|
unmap_area( area.boundary, size - lower_size );
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 1: /* in a reserved area */
|
2004-05-25 03:29:24 +02:00
|
|
|
wine_anon_mmap( addr, size, PROT_NONE, MAP_NORESERVE | MAP_FIXED );
|
2017-07-10 15:21:32 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case 0: /* not in a reserved area */
|
|
|
|
if (is_beyond_limit( addr, size, user_space_limit ))
|
|
|
|
add_reserved_area( addr, size );
|
|
|
|
else
|
|
|
|
munmap( addr, size );
|
|
|
|
break;
|
|
|
|
}
|
2004-05-25 03:29:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-06 09:58:18 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* alloc_view
|
|
|
|
*
|
|
|
|
* Allocate a new view. The csVirtual section must be held by caller.
|
|
|
|
*/
|
|
|
|
static struct file_view *alloc_view(void)
|
|
|
|
{
|
|
|
|
if (next_free_view)
|
|
|
|
{
|
|
|
|
struct file_view *ret = next_free_view;
|
|
|
|
next_free_view = *(struct file_view **)ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (view_block_start == view_block_end)
|
|
|
|
{
|
|
|
|
void *ptr = wine_anon_mmap( NULL, view_block_size, PROT_READ | PROT_WRITE, 0 );
|
|
|
|
if (ptr == (void *)-1) return NULL;
|
|
|
|
view_block_start = ptr;
|
|
|
|
view_block_end = view_block_start + view_block_size / sizeof(*view_block_start);
|
|
|
|
}
|
|
|
|
return view_block_start++;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-05-25 03:29:24 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* delete_view
|
|
|
|
*
|
|
|
|
* Deletes a view. The csVirtual section must be held by caller.
|
2004-01-16 05:52:17 +01:00
|
|
|
*/
|
2004-05-25 03:29:24 +02:00
|
|
|
static void delete_view( struct file_view *view ) /* [in] View */
|
2004-01-16 05:52:17 +01:00
|
|
|
{
|
2008-11-04 13:05:32 +01:00
|
|
|
if (!(view->protect & VPROT_SYSTEM)) unmap_area( view->base, view->size );
|
2017-09-22 12:13:50 +02:00
|
|
|
set_page_vprot( view->base, view->size, 0 );
|
ntdll: Introduce free_ranges indexing sequence.
This is an ordered range sequence used to keep track of free address
ranges.
The sequence contains an entry for every free address range, with base
pointing to the first free address and end pointing to the next first
used address. It is initialized to [0, ~0] for convenience, so that
there's always a range before or after a view.
In the worst case scenario, where memory is entirely fragmented, there's
going to be one more range than allocated views, but in general there's
much less. In any case, because of cache locality, iterating in the
contiguous sequence is much faster than traversing the view rbtree.
In theory there can be a performance hit when allocating or deleting a
view, as we may have to move the end of the sequence when a range is
split or merged. But in practice and given the usually low number of
ranges, this is not an issue.
The default and maximum sequence size can hold up to 65536 ranges, which
is much more than enough in general, and performance is probably going
to be bad before reaching the limit anyway. The code currently asserts
when reaching the limit, although we could possibly grow the sequence.
Signed-off-by: Rémi Bernon <rbernon@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
2020-05-29 20:11:18 +02:00
|
|
|
if (unix_funcs->mmap_is_in_reserved_area( view->base, view->size ))
|
|
|
|
free_ranges_remove_view( view );
|
2017-09-07 19:31:42 +02:00
|
|
|
wine_rb_remove( &views_tree, &view->entry );
|
2017-09-06 09:58:18 +02:00
|
|
|
*(struct file_view **)view = next_free_view;
|
|
|
|
next_free_view = view;
|
2004-01-16 05:52:17 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
2004-05-21 22:58:44 +02:00
|
|
|
* create_view
|
2002-09-17 20:54:42 +02:00
|
|
|
*
|
2004-05-21 22:58:44 +02:00
|
|
|
* Create a view. The csVirtual section must be held by caller.
|
2002-09-17 20:54:42 +02:00
|
|
|
*/
|
2008-11-04 13:05:32 +01:00
|
|
|
static NTSTATUS create_view( struct file_view **view_ret, void *base, size_t size, unsigned int vprot )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2004-05-21 22:58:44 +02:00
|
|
|
struct file_view *view;
|
2006-12-05 15:42:29 +01:00
|
|
|
int unix_prot = VIRTUAL_GetUnixProt( vprot );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2005-09-12 17:14:06 +02:00
|
|
|
assert( !((UINT_PTR)base & page_mask) );
|
2002-09-17 20:54:42 +02:00
|
|
|
assert( !(size & page_mask) );
|
2004-05-21 22:58:44 +02:00
|
|
|
|
2017-09-07 18:52:48 +02:00
|
|
|
/* Check for overlapping views. This can happen if the previous view
|
|
|
|
* was a system view that got unmapped behind our back. In that case
|
|
|
|
* we recover by simply deleting it. */
|
|
|
|
|
|
|
|
while ((view = find_view_range( base, size )))
|
|
|
|
{
|
|
|
|
TRACE( "overlapping view %p-%p for %p-%p\n",
|
|
|
|
view->base, (char *)view->base + view->size, base, (char *)base + size );
|
|
|
|
assert( view->protect & VPROT_SYSTEM );
|
|
|
|
delete_view( view );
|
|
|
|
}
|
|
|
|
|
2017-09-05 16:17:39 +02:00
|
|
|
if (!alloc_pages_vprot( base, size )) return STATUS_NO_MEMORY;
|
|
|
|
|
2004-05-21 22:58:44 +02:00
|
|
|
/* Create the view structure */
|
|
|
|
|
2017-09-06 09:58:18 +02:00
|
|
|
if (!(view = alloc_view()))
|
2008-10-31 13:00:59 +01:00
|
|
|
{
|
2017-09-06 09:58:18 +02:00
|
|
|
FIXME( "out of memory for %p-%p\n", base, (char *)base + size );
|
2008-10-31 13:00:59 +01:00
|
|
|
return STATUS_NO_MEMORY;
|
|
|
|
}
|
2004-05-21 22:58:44 +02:00
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
view->base = base;
|
2004-05-21 22:58:44 +02:00
|
|
|
view->size = size;
|
2002-09-17 20:54:42 +02:00
|
|
|
view->protect = vprot;
|
2017-09-05 16:20:23 +02:00
|
|
|
set_page_vprot( base, size, vprot );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2017-09-07 19:31:42 +02:00
|
|
|
wine_rb_put( &views_tree, view->base, &view->entry );
|
ntdll: Introduce free_ranges indexing sequence.
This is an ordered range sequence used to keep track of free address
ranges.
The sequence contains an entry for every free address range, with base
pointing to the first free address and end pointing to the next first
used address. It is initialized to [0, ~0] for convenience, so that
there's always a range before or after a view.
In the worst case scenario, where memory is entirely fragmented, there's
going to be one more range than allocated views, but in general there's
much less. In any case, because of cache locality, iterating in the
contiguous sequence is much faster than traversing the view rbtree.
In theory there can be a performance hit when allocating or deleting a
view, as we may have to move the end of the sequence when a range is
split or merged. But in practice and given the usually low number of
ranges, this is not an issue.
The default and maximum sequence size can hold up to 65536 ranges, which
is much more than enough in general, and performance is probably going
to be bad before reaching the limit anyway. The code currently asserts
when reaching the limit, although we could possibly grow the sequence.
Signed-off-by: Rémi Bernon <rbernon@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
2020-05-29 20:11:18 +02:00
|
|
|
if (unix_funcs->mmap_is_in_reserved_area( view->base, view->size ))
|
|
|
|
free_ranges_insert_view( view );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2004-05-21 22:58:44 +02:00
|
|
|
*view_ret = view;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2017-09-08 12:46:47 +02:00
|
|
|
if (force_exec_prot && (unix_prot & PROT_READ) && !(unix_prot & PROT_EXEC))
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2006-12-05 15:42:29 +01:00
|
|
|
TRACE( "forcing exec permission on %p-%p\n", base, (char *)base + size - 1 );
|
|
|
|
mprotect( base, size, unix_prot | PROT_EXEC );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2006-12-05 15:42:29 +01:00
|
|
|
return STATUS_SUCCESS;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* VIRTUAL_GetWin32Prot
|
|
|
|
*
|
|
|
|
* Convert page protections to Win32 flags.
|
|
|
|
*/
|
2017-09-08 15:37:12 +02:00
|
|
|
static DWORD VIRTUAL_GetWin32Prot( BYTE vprot, unsigned int map_prot )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2005-12-14 12:17:59 +01:00
|
|
|
DWORD ret = VIRTUAL_Win32Flags[vprot & 0x0f];
|
|
|
|
if (vprot & VPROT_GUARD) ret |= PAGE_GUARD;
|
2017-09-08 15:37:12 +02:00
|
|
|
if (map_prot & SEC_NOCACHE) ret |= PAGE_NOCACHE;
|
2005-12-14 12:17:59 +01:00
|
|
|
return ret;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
2008-12-11 14:05:42 +01:00
|
|
|
* get_vprot_flags
|
2002-09-17 20:54:42 +02:00
|
|
|
*
|
|
|
|
* Build page protections from Win32 flags.
|
|
|
|
*
|
2005-05-18 11:45:12 +02:00
|
|
|
* PARAMS
|
|
|
|
* protect [I] Win32 protection flags
|
|
|
|
*
|
2002-09-17 20:54:42 +02:00
|
|
|
* RETURNS
|
|
|
|
* Value of page protection flags
|
|
|
|
*/
|
2011-12-16 07:45:23 +01:00
|
|
|
static NTSTATUS get_vprot_flags( DWORD protect, unsigned int *vprot, BOOL image )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
|
|
|
switch(protect & 0xff)
|
|
|
|
{
|
|
|
|
case PAGE_READONLY:
|
2008-12-11 14:05:42 +01:00
|
|
|
*vprot = VPROT_READ;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
|
|
|
case PAGE_READWRITE:
|
2011-12-16 07:45:23 +01:00
|
|
|
if (image)
|
|
|
|
*vprot = VPROT_READ | VPROT_WRITECOPY;
|
|
|
|
else
|
|
|
|
*vprot = VPROT_READ | VPROT_WRITE;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
|
|
|
case PAGE_WRITECOPY:
|
2008-12-11 14:05:42 +01:00
|
|
|
*vprot = VPROT_READ | VPROT_WRITECOPY;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
|
|
|
case PAGE_EXECUTE:
|
2008-12-11 14:05:42 +01:00
|
|
|
*vprot = VPROT_EXEC;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
|
|
|
case PAGE_EXECUTE_READ:
|
2008-12-11 14:05:42 +01:00
|
|
|
*vprot = VPROT_EXEC | VPROT_READ;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
|
|
|
case PAGE_EXECUTE_READWRITE:
|
2011-12-16 07:45:23 +01:00
|
|
|
if (image)
|
|
|
|
*vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITECOPY;
|
|
|
|
else
|
|
|
|
*vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITE;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
|
|
|
case PAGE_EXECUTE_WRITECOPY:
|
2008-12-11 14:05:42 +01:00
|
|
|
*vprot = VPROT_EXEC | VPROT_READ | VPROT_WRITECOPY;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
|
|
|
case PAGE_NOACCESS:
|
2008-12-11 14:05:42 +01:00
|
|
|
*vprot = 0;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
2008-12-11 14:05:42 +01:00
|
|
|
default:
|
2011-09-28 07:47:06 +02:00
|
|
|
return STATUS_INVALID_PAGE_PROTECTION;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2008-12-11 14:05:42 +01:00
|
|
|
if (protect & PAGE_GUARD) *vprot |= VPROT_GUARD;
|
|
|
|
return STATUS_SUCCESS;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-10-08 21:26:28 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* mprotect_exec
|
|
|
|
*
|
|
|
|
* Wrapper for mprotect, adds PROT_EXEC if forced by force_exec_prot
|
|
|
|
*/
|
2017-09-22 09:52:29 +02:00
|
|
|
static inline int mprotect_exec( void *base, size_t size, int unix_prot )
|
2014-10-08 21:26:28 +02:00
|
|
|
{
|
2017-09-08 12:46:47 +02:00
|
|
|
if (force_exec_prot && (unix_prot & PROT_READ) && !(unix_prot & PROT_EXEC))
|
2014-10-08 21:26:28 +02:00
|
|
|
{
|
|
|
|
TRACE( "forcing exec permission on %p-%p\n", base, (char *)base + size - 1 );
|
|
|
|
if (!mprotect( base, size, unix_prot | PROT_EXEC )) return 0;
|
|
|
|
/* exec + write may legitimately fail, in that case fall back to write only */
|
|
|
|
if (!(unix_prot & PROT_WRITE)) return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mprotect( base, size, unix_prot );
|
|
|
|
}
|
|
|
|
|
2017-09-05 14:28:27 +02:00
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* mprotect_range
|
|
|
|
*
|
|
|
|
* Call mprotect on a page range, applying the protections from the per-page byte.
|
|
|
|
*/
|
2017-09-22 09:52:29 +02:00
|
|
|
static void mprotect_range( void *base, size_t size, BYTE set, BYTE clear )
|
2017-09-05 14:28:27 +02:00
|
|
|
{
|
|
|
|
size_t i, count;
|
2017-09-22 14:56:56 +02:00
|
|
|
char *addr = ROUND_ADDR( base, page_mask );
|
2017-09-05 14:28:27 +02:00
|
|
|
int prot, next;
|
|
|
|
|
2017-09-22 14:56:56 +02:00
|
|
|
size = ROUND_SIZE( base, size );
|
2017-09-05 16:20:23 +02:00
|
|
|
prot = VIRTUAL_GetUnixProt( (get_page_vprot( addr ) & ~clear ) | set );
|
2017-09-05 14:28:27 +02:00
|
|
|
for (count = i = 1; i < size >> page_shift; i++, count++)
|
|
|
|
{
|
2017-09-05 16:20:23 +02:00
|
|
|
next = VIRTUAL_GetUnixProt( (get_page_vprot( addr + (count << page_shift) ) & ~clear) | set );
|
2017-09-05 14:28:27 +02:00
|
|
|
if (next == prot) continue;
|
2017-09-22 09:52:29 +02:00
|
|
|
mprotect_exec( addr, count << page_shift, prot );
|
2017-09-05 14:28:27 +02:00
|
|
|
addr += count << page_shift;
|
|
|
|
prot = next;
|
|
|
|
count = 0;
|
|
|
|
}
|
2017-09-22 09:52:29 +02:00
|
|
|
if (count) mprotect_exec( addr, count << page_shift, prot );
|
2017-09-05 14:28:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* VIRTUAL_SetProt
|
|
|
|
*
|
|
|
|
* Change the protection of a range of pages.
|
|
|
|
*
|
|
|
|
* RETURNS
|
|
|
|
* TRUE: Success
|
|
|
|
* FALSE: Failure
|
|
|
|
*/
|
2010-10-19 15:34:36 +02:00
|
|
|
static BOOL VIRTUAL_SetProt( struct file_view *view, /* [in] Pointer to view */
|
2002-09-17 20:54:42 +02:00
|
|
|
void *base, /* [in] Starting address */
|
2005-07-15 12:01:30 +02:00
|
|
|
size_t size, /* [in] Size in bytes */
|
2002-09-17 20:54:42 +02:00
|
|
|
BYTE vprot ) /* [in] Protections to use */
|
|
|
|
{
|
2006-12-05 15:42:29 +01:00
|
|
|
int unix_prot = VIRTUAL_GetUnixProt(vprot);
|
|
|
|
|
2008-11-25 12:07:35 +01:00
|
|
|
if (view->protect & VPROT_WRITEWATCH)
|
|
|
|
{
|
|
|
|
/* each page may need different protections depending on write watch flag */
|
2017-09-05 16:20:23 +02:00
|
|
|
set_page_vprot_bits( base, size, vprot & ~VPROT_WRITEWATCH, ~vprot & ~VPROT_WRITEWATCH );
|
2017-09-22 09:52:29 +02:00
|
|
|
mprotect_range( base, size, 0, 0 );
|
2008-11-25 12:07:35 +01:00
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2008-04-01 17:37:17 +02:00
|
|
|
/* if setting stack guard pages, store the permissions first, as the guard may be
|
|
|
|
* triggered at any point after mprotect and change the permissions again */
|
|
|
|
if ((vprot & VPROT_GUARD) &&
|
2009-03-17 00:12:11 +01:00
|
|
|
(base >= NtCurrentTeb()->DeallocationStack) &&
|
|
|
|
(base < NtCurrentTeb()->Tib.StackBase))
|
2008-04-01 17:37:17 +02:00
|
|
|
{
|
2017-09-05 16:20:23 +02:00
|
|
|
set_page_vprot( base, size, vprot );
|
2008-04-01 17:37:17 +02:00
|
|
|
mprotect( base, size, unix_prot );
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
2017-09-22 09:52:29 +02:00
|
|
|
if (mprotect_exec( base, size, unix_prot )) /* FIXME: last error */
|
2014-10-08 21:26:28 +02:00
|
|
|
return FALSE;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2017-09-05 16:20:23 +02:00
|
|
|
set_page_vprot( base, size, vprot );
|
2002-09-17 20:54:42 +02:00
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-12 10:57:07 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* set_protection
|
|
|
|
*
|
|
|
|
* Set page protections on a range of pages
|
|
|
|
*/
|
|
|
|
static NTSTATUS set_protection( struct file_view *view, void *base, SIZE_T size, ULONG protect )
|
|
|
|
{
|
|
|
|
unsigned int vprot;
|
|
|
|
NTSTATUS status;
|
|
|
|
|
|
|
|
if ((status = get_vprot_flags( protect, &vprot, view->protect & SEC_IMAGE ))) return status;
|
2017-09-12 12:59:28 +02:00
|
|
|
if (is_view_valloc( view ))
|
2017-09-12 10:57:07 +02:00
|
|
|
{
|
|
|
|
if (vprot & VPROT_WRITECOPY) return STATUS_INVALID_PAGE_PROTECTION;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
BYTE access = vprot & (VPROT_READ | VPROT_WRITE | VPROT_EXEC);
|
|
|
|
if ((view->protect & access) != access) return STATUS_INVALID_PAGE_PROTECTION;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!VIRTUAL_SetProt( view, base, size, vprot | VPROT_COMMITTED )) return STATUS_ACCESS_DENIED;
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 14:56:56 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* update_write_watches
|
|
|
|
*/
|
|
|
|
static void update_write_watches( void *base, size_t size, size_t accessed_size )
|
|
|
|
{
|
|
|
|
TRACE( "updating watch %p-%p-%p\n", base, (char *)base + accessed_size, (char *)base + size );
|
|
|
|
/* clear write watch flag on accessed pages */
|
|
|
|
set_page_vprot_bits( base, accessed_size, 0, VPROT_WRITEWATCH );
|
|
|
|
/* restore page protections on the entire range */
|
|
|
|
mprotect_range( base, size, 0, 0 );
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-25 12:07:35 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* reset_write_watches
|
|
|
|
*
|
|
|
|
* Reset write watches in a memory range.
|
|
|
|
*/
|
2017-09-22 09:52:29 +02:00
|
|
|
static void reset_write_watches( void *base, SIZE_T size )
|
2008-11-25 12:07:35 +01:00
|
|
|
{
|
2017-09-05 16:20:23 +02:00
|
|
|
set_page_vprot_bits( base, size, VPROT_WRITEWATCH, 0 );
|
2017-09-22 09:52:29 +02:00
|
|
|
mprotect_range( base, size, 0, 0 );
|
2008-11-25 12:07:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-09-14 12:36:58 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* unmap_extra_space
|
|
|
|
*
|
|
|
|
* Release the extra memory while keeping the range starting on the granularity boundary.
|
|
|
|
*/
|
2020-04-30 10:07:36 +02:00
|
|
|
static inline void *unmap_extra_space( void *ptr, size_t total_size, size_t wanted_size )
|
2005-09-14 12:36:58 +02:00
|
|
|
{
|
2020-04-30 10:07:36 +02:00
|
|
|
if ((ULONG_PTR)ptr & granularity_mask)
|
2005-09-14 12:36:58 +02:00
|
|
|
{
|
2020-04-30 10:07:36 +02:00
|
|
|
size_t extra = granularity_mask + 1 - ((ULONG_PTR)ptr & granularity_mask);
|
2005-09-14 12:36:58 +02:00
|
|
|
munmap( ptr, extra );
|
|
|
|
ptr = (char *)ptr + extra;
|
|
|
|
total_size -= extra;
|
|
|
|
}
|
|
|
|
if (total_size > wanted_size)
|
|
|
|
munmap( (char *)ptr + wanted_size, total_size - wanted_size );
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-07-24 14:00:19 +02:00
|
|
|
struct alloc_area
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
int top_down;
|
2008-11-07 11:04:07 +01:00
|
|
|
void *limit;
|
2006-07-24 14:00:19 +02:00
|
|
|
void *result;
|
|
|
|
};
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* alloc_reserved_area_callback
|
|
|
|
*
|
2020-05-17 11:04:47 +02:00
|
|
|
* Try to map some space inside a reserved area. Callback for mmap_enum_reserved_areas.
|
2006-07-24 14:00:19 +02:00
|
|
|
*/
|
2020-05-17 11:04:47 +02:00
|
|
|
static int CDECL alloc_reserved_area_callback( void *start, SIZE_T size, void *arg )
|
2006-07-24 14:00:19 +02:00
|
|
|
{
|
|
|
|
struct alloc_area *alloc = arg;
|
|
|
|
void *end = (char *)start + size;
|
|
|
|
|
|
|
|
if (start < address_space_start) start = address_space_start;
|
2008-11-07 11:04:07 +01:00
|
|
|
if (is_beyond_limit( start, size, alloc->limit )) end = alloc->limit;
|
2006-07-24 14:00:19 +02:00
|
|
|
if (start >= end) return 0;
|
|
|
|
|
|
|
|
/* make sure we don't touch the preloader reserved range */
|
|
|
|
if (preload_reserve_end >= start)
|
|
|
|
{
|
|
|
|
if (preload_reserve_end >= end)
|
|
|
|
{
|
|
|
|
if (preload_reserve_start <= start) return 0; /* no space in that area */
|
|
|
|
if (preload_reserve_start < end) end = preload_reserve_start;
|
|
|
|
}
|
|
|
|
else if (preload_reserve_start <= start) start = preload_reserve_end;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* range is split in two by the preloader reservation, try first part */
|
2019-12-26 22:27:25 +01:00
|
|
|
if ((alloc->result = find_reserved_free_area( start, preload_reserve_start, alloc->size,
|
2020-04-30 10:07:36 +02:00
|
|
|
alloc->top_down )))
|
2006-07-24 14:00:19 +02:00
|
|
|
return 1;
|
|
|
|
/* then fall through to try second part */
|
|
|
|
start = preload_reserve_end;
|
|
|
|
}
|
|
|
|
}
|
2020-04-30 10:07:36 +02:00
|
|
|
if ((alloc->result = find_reserved_free_area( start, end, alloc->size, alloc->top_down )))
|
2006-07-24 14:00:19 +02:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-07-10 15:21:32 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* map_fixed_area
|
|
|
|
*
|
|
|
|
* mmap the fixed memory area.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
|
|
|
static NTSTATUS map_fixed_area( void *base, size_t size, unsigned int vprot )
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
|
2020-05-17 11:04:47 +02:00
|
|
|
switch (unix_funcs->mmap_is_in_reserved_area( base, size ))
|
2017-07-10 15:21:32 +02:00
|
|
|
{
|
|
|
|
case -1: /* partially in a reserved area */
|
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
struct area_boundary area;
|
|
|
|
size_t lower_size;
|
|
|
|
area.base = base;
|
|
|
|
area.size = size;
|
2020-05-17 11:04:47 +02:00
|
|
|
unix_funcs->mmap_enum_reserved_areas( get_area_boundary_callback, &area, 0 );
|
2017-07-10 15:21:32 +02:00
|
|
|
assert( area.boundary );
|
|
|
|
lower_size = (char *)area.boundary - (char *)base;
|
|
|
|
status = map_fixed_area( base, lower_size, vprot );
|
|
|
|
if (status == STATUS_SUCCESS)
|
|
|
|
{
|
|
|
|
status = map_fixed_area( area.boundary, size - lower_size, vprot);
|
|
|
|
if (status != STATUS_SUCCESS) unmap_area( base, lower_size );
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
case 0: /* not in a reserved area, do a normal allocation */
|
|
|
|
if ((ptr = wine_anon_mmap( base, size, VIRTUAL_GetUnixProt(vprot), 0 )) == (void *)-1)
|
|
|
|
{
|
|
|
|
if (errno == ENOMEM) return STATUS_NO_MEMORY;
|
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
|
|
|
if (ptr != base)
|
|
|
|
{
|
|
|
|
/* We couldn't get the address we wanted */
|
|
|
|
if (is_beyond_limit( ptr, size, user_space_limit )) add_reserved_area( ptr, size );
|
|
|
|
else munmap( ptr, size );
|
|
|
|
return STATUS_CONFLICTING_ADDRESSES;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
case 1: /* in a reserved area, make sure the address is available */
|
|
|
|
if (find_view_range( base, size )) return STATUS_CONFLICTING_ADDRESSES;
|
|
|
|
/* replace the reserved area by our mapping */
|
|
|
|
if ((ptr = wine_anon_mmap( base, size, VIRTUAL_GetUnixProt(vprot), MAP_FIXED )) != base)
|
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (is_beyond_limit( ptr, size, working_set_limit )) working_set_limit = address_space_limit;
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
2006-07-24 14:00:19 +02:00
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
2004-05-21 22:58:44 +02:00
|
|
|
* map_view
|
2002-09-17 20:54:42 +02:00
|
|
|
*
|
2004-05-21 22:58:44 +02:00
|
|
|
* Create a view and mmap the corresponding memory area.
|
|
|
|
* The csVirtual section must be held by caller.
|
2002-09-17 20:54:42 +02:00
|
|
|
*/
|
2020-04-30 10:07:36 +02:00
|
|
|
static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size,
|
2019-08-01 10:07:44 +02:00
|
|
|
int top_down, unsigned int vprot, unsigned short zero_bits_64 )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2004-05-21 22:58:44 +02:00
|
|
|
void *ptr;
|
|
|
|
NTSTATUS status;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2004-05-21 22:58:44 +02:00
|
|
|
if (base)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2008-10-28 13:51:36 +01:00
|
|
|
if (is_beyond_limit( base, size, address_space_limit ))
|
2004-05-25 03:29:24 +02:00
|
|
|
return STATUS_WORKING_SET_LIMIT_RANGE;
|
2017-07-10 15:21:32 +02:00
|
|
|
status = map_fixed_area( base, size, vprot );
|
|
|
|
if (status != STATUS_SUCCESS) return status;
|
|
|
|
ptr = base;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2004-05-21 22:58:44 +02:00
|
|
|
else
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2020-04-30 10:07:36 +02:00
|
|
|
size_t view_size = size + granularity_mask + 1;
|
2006-07-24 14:00:19 +02:00
|
|
|
struct alloc_area alloc;
|
|
|
|
|
|
|
|
alloc.size = size;
|
|
|
|
alloc.top_down = top_down;
|
2019-08-01 10:07:45 +02:00
|
|
|
alloc.limit = (void*)(get_zero_bits_64_mask( zero_bits_64 ) & (UINT_PTR)user_space_limit);
|
|
|
|
|
2020-05-17 11:04:47 +02:00
|
|
|
if (unix_funcs->mmap_enum_reserved_areas( alloc_reserved_area_callback, &alloc, top_down ))
|
2006-07-24 14:00:19 +02:00
|
|
|
{
|
|
|
|
ptr = alloc.result;
|
|
|
|
TRACE( "got mem in reserved area %p-%p\n", ptr, (char *)ptr + size );
|
|
|
|
if (wine_anon_mmap( ptr, size, VIRTUAL_GetUnixProt(vprot), MAP_FIXED ) != ptr)
|
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
goto done;
|
|
|
|
}
|
2004-05-21 22:58:44 +02:00
|
|
|
|
2019-12-26 22:27:27 +01:00
|
|
|
if (zero_bits_64)
|
2004-05-21 22:58:44 +02:00
|
|
|
{
|
2020-04-30 10:07:36 +02:00
|
|
|
if (!(ptr = map_free_area( address_space_start, alloc.limit, size,
|
|
|
|
top_down, VIRTUAL_GetUnixProt(vprot) )))
|
2019-08-01 10:07:45 +02:00
|
|
|
return STATUS_NO_MEMORY;
|
2019-12-26 22:27:27 +01:00
|
|
|
TRACE( "got mem with map_free_area %p-%p\n", ptr, (char *)ptr + size );
|
|
|
|
goto done;
|
|
|
|
}
|
2019-08-01 10:07:45 +02:00
|
|
|
|
2019-12-26 22:27:27 +01:00
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
if ((ptr = wine_anon_mmap( NULL, view_size, VIRTUAL_GetUnixProt(vprot), 0 )) == (void *)-1)
|
2004-05-25 03:29:24 +02:00
|
|
|
{
|
|
|
|
if (errno == ENOMEM) return STATUS_NO_MEMORY;
|
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
2006-07-24 14:00:19 +02:00
|
|
|
TRACE( "got mem with anon mmap %p-%p\n", ptr, (char *)ptr + size );
|
2004-05-25 03:29:24 +02:00
|
|
|
/* if we got something beyond the user limit, unmap it and retry */
|
2004-06-18 02:26:57 +02:00
|
|
|
if (is_beyond_limit( ptr, view_size, user_space_limit )) add_reserved_area( ptr, view_size );
|
2004-05-25 03:29:24 +02:00
|
|
|
else break;
|
2004-05-21 22:58:44 +02:00
|
|
|
}
|
2020-04-30 10:07:36 +02:00
|
|
|
ptr = unmap_extra_space( ptr, view_size, size );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2006-07-24 14:00:19 +02:00
|
|
|
done:
|
2004-05-21 22:58:44 +02:00
|
|
|
status = create_view( view_ret, ptr, size, vprot );
|
2004-05-25 03:29:24 +02:00
|
|
|
if (status != STATUS_SUCCESS) unmap_area( ptr, size );
|
2004-05-21 22:58:44 +02:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* map_file_into_view
|
|
|
|
*
|
|
|
|
* Wrapper for mmap() to map a file into a view, falling back to read if mmap fails.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
|
|
|
static NTSTATUS map_file_into_view( struct file_view *view, int fd, size_t start, size_t size,
|
2014-05-13 12:28:58 +02:00
|
|
|
off_t offset, unsigned int vprot, BOOL removable )
|
2004-05-21 22:58:44 +02:00
|
|
|
{
|
|
|
|
void *ptr;
|
2008-11-05 12:31:49 +01:00
|
|
|
int prot = VIRTUAL_GetUnixProt( vprot | VPROT_COMMITTED /* make sure it is accessible */ );
|
2018-02-06 10:43:12 +01:00
|
|
|
unsigned int flags = MAP_FIXED | ((vprot & VPROT_WRITECOPY) ? MAP_PRIVATE : MAP_SHARED);
|
2004-05-21 22:58:44 +02:00
|
|
|
|
|
|
|
assert( start < view->size );
|
|
|
|
assert( start + size <= view->size );
|
|
|
|
|
2017-09-08 12:46:47 +02:00
|
|
|
if (force_exec_prot && (vprot & VPROT_READ))
|
2010-11-14 12:52:01 +01:00
|
|
|
{
|
|
|
|
TRACE( "forcing exec permission on mapping %p-%p\n",
|
|
|
|
(char *)view->base + start, (char *)view->base + start + size - 1 );
|
|
|
|
prot |= PROT_EXEC;
|
|
|
|
}
|
|
|
|
|
2004-05-21 22:58:44 +02:00
|
|
|
/* only try mmap if media is not removable (or if we require write access) */
|
2014-05-08 18:22:08 +02:00
|
|
|
if (!removable || (flags & MAP_SHARED))
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2014-05-13 12:28:58 +02:00
|
|
|
if (mmap( (char *)view->base + start, size, prot, flags, fd, offset ) != (void *)-1)
|
2004-05-21 22:58:44 +02:00
|
|
|
goto done;
|
|
|
|
|
2017-11-17 11:54:51 +01:00
|
|
|
switch (errno)
|
2008-01-03 13:20:03 +01:00
|
|
|
{
|
2017-11-17 11:54:51 +01:00
|
|
|
case EINVAL: /* file offset is not page-aligned, fall back to read() */
|
|
|
|
if (flags & MAP_SHARED) return STATUS_INVALID_PARAMETER;
|
|
|
|
break;
|
|
|
|
case ENOEXEC:
|
|
|
|
case ENODEV: /* filesystem doesn't support mmap(), fall back to read() */
|
2018-02-06 10:43:12 +01:00
|
|
|
if (vprot & VPROT_WRITE)
|
2017-11-17 11:54:51 +01:00
|
|
|
{
|
|
|
|
ERR( "shared writable mmap not supported, broken filesystem?\n" );
|
|
|
|
return STATUS_NOT_SUPPORTED;
|
|
|
|
}
|
|
|
|
break;
|
2017-11-17 11:54:57 +01:00
|
|
|
case EACCES:
|
2017-11-17 11:54:51 +01:00
|
|
|
case EPERM: /* noexec filesystem, fall back to read() */
|
|
|
|
if (flags & MAP_SHARED)
|
|
|
|
{
|
|
|
|
if (prot & PROT_EXEC) ERR( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
|
|
|
|
return STATUS_ACCESS_DENIED;
|
|
|
|
}
|
|
|
|
if (prot & PROT_EXEC) WARN( "failed to set PROT_EXEC on file map, noexec filesystem?\n" );
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return FILE_GetNtStatus();
|
2008-01-03 13:20:03 +01:00
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2004-05-21 22:58:44 +02:00
|
|
|
|
|
|
|
/* Reserve the memory with an anonymous mmap */
|
|
|
|
ptr = wine_anon_mmap( (char *)view->base + start, size, PROT_READ | PROT_WRITE, MAP_FIXED );
|
|
|
|
if (ptr == (void *)-1) return FILE_GetNtStatus();
|
|
|
|
/* Now read in the file */
|
|
|
|
pread( fd, ptr, size, offset );
|
|
|
|
if (prot != (PROT_READ|PROT_WRITE)) mprotect( ptr, size, prot ); /* Set the right protection */
|
|
|
|
done:
|
2017-09-05 16:20:23 +02:00
|
|
|
set_page_vprot( (char *)view->base + start, size, vprot );
|
2002-09-17 20:54:42 +02:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-05 12:24:05 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* get_committed_size
|
|
|
|
*
|
|
|
|
* Get the size of the committed range starting at base.
|
|
|
|
* Also return the protections for the first page.
|
|
|
|
*/
|
|
|
|
static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vprot )
|
|
|
|
{
|
|
|
|
SIZE_T i, start;
|
|
|
|
|
|
|
|
start = ((char *)base - (char *)view->base) >> page_shift;
|
2017-09-05 16:20:23 +02:00
|
|
|
*vprot = get_page_vprot( base );
|
2008-11-05 12:24:05 +01:00
|
|
|
|
2017-09-08 15:37:12 +02:00
|
|
|
if (view->protect & SEC_RESERVE)
|
2008-11-05 12:24:05 +01:00
|
|
|
{
|
|
|
|
SIZE_T ret = 0;
|
|
|
|
SERVER_START_REQ( get_mapping_committed_range )
|
|
|
|
{
|
2017-09-26 14:38:46 +02:00
|
|
|
req->base = wine_server_client_ptr( view->base );
|
2008-11-05 12:24:05 +01:00
|
|
|
req->offset = start << page_shift;
|
|
|
|
if (!wine_server_call( req ))
|
|
|
|
{
|
|
|
|
ret = reply->size;
|
|
|
|
if (reply->committed)
|
|
|
|
{
|
|
|
|
*vprot |= VPROT_COMMITTED;
|
2017-09-05 16:20:23 +02:00
|
|
|
set_page_vprot_bits( base, ret, VPROT_COMMITTED, 0 );
|
2008-11-05 12:24:05 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
for (i = start + 1; i < view->size >> page_shift; i++)
|
2017-09-05 16:20:23 +02:00
|
|
|
if ((*vprot ^ get_page_vprot( (char *)view->base + (i << page_shift) )) & VPROT_COMMITTED) break;
|
2008-11-05 12:24:05 +01:00
|
|
|
return (i - start) << page_shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-05-21 22:58:44 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* decommit_view
|
|
|
|
*
|
|
|
|
* Decommit some pages of a given view.
|
|
|
|
* The csVirtual section must be held by caller.
|
|
|
|
*/
|
|
|
|
static NTSTATUS decommit_pages( struct file_view *view, size_t start, size_t size )
|
|
|
|
{
|
|
|
|
if (wine_anon_mmap( (char *)view->base + start, size, PROT_NONE, MAP_FIXED ) != (void *)-1)
|
|
|
|
{
|
2017-09-05 16:20:23 +02:00
|
|
|
set_page_vprot_bits( (char *)view->base + start, size, 0, VPROT_COMMITTED );
|
2004-05-21 22:58:44 +02:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
return FILE_GetNtStatus();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-18 20:11:49 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* allocate_dos_memory
|
|
|
|
*
|
|
|
|
* Allocate the DOS memory range.
|
|
|
|
*/
|
|
|
|
static NTSTATUS allocate_dos_memory( struct file_view **view, unsigned int vprot )
|
|
|
|
{
|
|
|
|
size_t size;
|
|
|
|
void *addr = NULL;
|
|
|
|
void * const low_64k = (void *)0x10000;
|
|
|
|
const size_t dosmem_size = 0x110000;
|
|
|
|
int unix_prot = VIRTUAL_GetUnixProt( vprot );
|
|
|
|
|
|
|
|
/* check for existing view */
|
|
|
|
|
2017-09-07 18:52:48 +02:00
|
|
|
if (find_view_range( 0, dosmem_size )) return STATUS_CONFLICTING_ADDRESSES;
|
2008-11-18 20:11:49 +01:00
|
|
|
|
|
|
|
/* check without the first 64K */
|
|
|
|
|
2020-05-17 11:04:47 +02:00
|
|
|
if (unix_funcs->mmap_is_in_reserved_area( low_64k, dosmem_size - 0x10000 ) != 1)
|
2008-11-18 20:11:49 +01:00
|
|
|
{
|
|
|
|
addr = wine_anon_mmap( low_64k, dosmem_size - 0x10000, unix_prot, 0 );
|
|
|
|
if (addr != low_64k)
|
|
|
|
{
|
|
|
|
if (addr != (void *)-1) munmap( addr, dosmem_size - 0x10000 );
|
2020-04-30 10:07:36 +02:00
|
|
|
return map_view( view, NULL, dosmem_size, FALSE, vprot, 0 );
|
2008-11-18 20:11:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now try to allocate the low 64K too */
|
|
|
|
|
2020-05-17 11:04:47 +02:00
|
|
|
if (unix_funcs->mmap_is_in_reserved_area( NULL, 0x10000 ) != 1)
|
2008-11-18 20:11:49 +01:00
|
|
|
{
|
|
|
|
addr = wine_anon_mmap( (void *)page_size, 0x10000 - page_size, unix_prot, 0 );
|
|
|
|
if (addr == (void *)page_size)
|
|
|
|
{
|
2010-11-04 17:47:46 +01:00
|
|
|
if (!wine_anon_mmap( NULL, page_size, unix_prot, MAP_FIXED ))
|
|
|
|
{
|
|
|
|
addr = NULL;
|
|
|
|
TRACE( "successfully mapped low 64K range\n" );
|
|
|
|
}
|
|
|
|
else TRACE( "failed to map page 0\n" );
|
2008-11-18 20:11:49 +01:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (addr != (void *)-1) munmap( addr, 0x10000 - page_size );
|
|
|
|
addr = low_64k;
|
|
|
|
TRACE( "failed to map low 64K range\n" );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now reserve the whole range */
|
|
|
|
|
|
|
|
size = (char *)dosmem_size - (char *)addr;
|
|
|
|
wine_anon_mmap( addr, size, unix_prot, MAP_FIXED );
|
|
|
|
return create_view( view, addr, size, vprot );
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-11-16 20:08:11 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* map_pe_header
|
|
|
|
*
|
|
|
|
* Map the header of a PE file into memory.
|
|
|
|
*/
|
|
|
|
static NTSTATUS map_pe_header( void *ptr, size_t size, int fd, BOOL *removable )
|
|
|
|
{
|
|
|
|
if (!size) return STATUS_INVALID_IMAGE_FORMAT;
|
|
|
|
|
|
|
|
if (!*removable)
|
|
|
|
{
|
|
|
|
if (mmap( ptr, size, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, fd, 0 ) != (void *)-1)
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
|
|
|
|
switch (errno)
|
|
|
|
{
|
|
|
|
case EPERM:
|
2017-11-17 11:54:57 +01:00
|
|
|
case EACCES:
|
2017-11-16 20:08:11 +01:00
|
|
|
WARN( "noexec file system, falling back to read\n" );
|
|
|
|
break;
|
|
|
|
case ENOEXEC:
|
|
|
|
case ENODEV:
|
|
|
|
WARN( "file system doesn't support mmap, falling back to read\n" );
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return FILE_GetNtStatus();
|
|
|
|
}
|
|
|
|
*removable = TRUE;
|
|
|
|
}
|
|
|
|
pread( fd, ptr, size, 0 );
|
|
|
|
return STATUS_SUCCESS; /* page protections will be updated later */
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* map_image
|
|
|
|
*
|
|
|
|
* Map an executable (PE format) image into memory.
|
|
|
|
*/
|
2019-08-01 10:07:44 +02:00
|
|
|
static NTSTATUS map_image( HANDLE hmapping, ACCESS_MASK access, int fd, int top_down, unsigned short zero_bits_64,
|
2018-02-15 12:23:23 +01:00
|
|
|
pe_image_info_t *image_info, int shared_fd, BOOL removable, PVOID *addr_ptr )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
|
|
|
IMAGE_DOS_HEADER *dos;
|
|
|
|
IMAGE_NT_HEADERS *nt;
|
2012-04-05 18:36:33 +02:00
|
|
|
IMAGE_SECTION_HEADER sections[96];
|
|
|
|
IMAGE_SECTION_HEADER *sec;
|
2002-09-17 20:54:42 +02:00
|
|
|
IMAGE_DATA_DIRECTORY *imports;
|
2004-05-21 22:58:44 +02:00
|
|
|
NTSTATUS status = STATUS_CONFLICTING_ADDRESSES;
|
2018-02-15 12:23:23 +01:00
|
|
|
SIZE_T header_size, total_size = image_info->map_size;
|
2004-05-21 22:58:44 +02:00
|
|
|
int i;
|
|
|
|
off_t pos;
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2006-01-03 17:39:23 +01:00
|
|
|
struct stat st;
|
2004-05-21 22:58:44 +02:00
|
|
|
struct file_view *view = NULL;
|
2012-03-07 23:22:07 +01:00
|
|
|
char *ptr, *header_end, *header_start;
|
2018-02-15 12:23:23 +01:00
|
|
|
char *base = wine_server_get_ptr( image_info->base );
|
|
|
|
|
|
|
|
if (total_size != image_info->map_size) /* truncated */
|
|
|
|
{
|
|
|
|
WARN( "Modules larger than 4Gb (%s) not supported\n", wine_dbgstr_longlong(image_info->map_size) );
|
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
|
|
|
if ((ULONG_PTR)base != image_info->base) base = NULL;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
/* zero-map the whole range */
|
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2004-05-21 22:58:44 +02:00
|
|
|
|
2010-12-16 10:25:14 +01:00
|
|
|
if (base >= (char *)address_space_start) /* make sure the DOS area remains free */
|
2020-04-30 10:07:36 +02:00
|
|
|
status = map_view( &view, base, total_size, top_down, SEC_IMAGE | SEC_FILE |
|
2019-08-01 10:07:44 +02:00
|
|
|
VPROT_COMMITTED | VPROT_READ | VPROT_EXEC | VPROT_WRITECOPY, zero_bits_64 );
|
2004-05-21 22:58:44 +02:00
|
|
|
|
2009-05-27 21:17:09 +02:00
|
|
|
if (status != STATUS_SUCCESS)
|
2020-04-30 10:07:36 +02:00
|
|
|
status = map_view( &view, NULL, total_size, top_down, SEC_IMAGE | SEC_FILE |
|
2019-08-01 10:07:44 +02:00
|
|
|
VPROT_COMMITTED | VPROT_READ | VPROT_EXEC | VPROT_WRITECOPY, zero_bits_64 );
|
2004-05-21 22:58:44 +02:00
|
|
|
|
|
|
|
if (status != STATUS_SUCCESS) goto error;
|
|
|
|
|
|
|
|
ptr = view->base;
|
2002-09-17 20:54:42 +02:00
|
|
|
TRACE_(module)( "mapped PE file at %p-%p\n", ptr, ptr + total_size );
|
|
|
|
|
|
|
|
/* map the header */
|
|
|
|
|
2006-01-03 17:39:23 +01:00
|
|
|
if (fstat( fd, &st ) == -1)
|
|
|
|
{
|
|
|
|
status = FILE_GetNtStatus();
|
|
|
|
goto error;
|
|
|
|
}
|
2018-02-15 12:23:23 +01:00
|
|
|
header_size = min( image_info->header_size, st.st_size );
|
2017-11-16 20:08:11 +01:00
|
|
|
if ((status = map_pe_header( view->base, header_size, fd, &removable )) != STATUS_SUCCESS) goto error;
|
|
|
|
|
|
|
|
status = STATUS_INVALID_IMAGE_FORMAT; /* generic error */
|
2002-09-17 20:54:42 +02:00
|
|
|
dos = (IMAGE_DOS_HEADER *)ptr;
|
|
|
|
nt = (IMAGE_NT_HEADERS *)(ptr + dos->e_lfanew);
|
2006-01-13 13:52:07 +01:00
|
|
|
header_end = ptr + ROUND_SIZE( 0, header_size );
|
2006-12-13 13:06:12 +01:00
|
|
|
memset( ptr + header_size, 0, header_end - (ptr + header_size) );
|
2006-01-13 13:52:07 +01:00
|
|
|
if ((char *)(nt + 1) > header_end) goto error;
|
2012-03-07 23:22:07 +01:00
|
|
|
header_start = (char*)&nt->OptionalHeader+nt->FileHeader.SizeOfOptionalHeader;
|
2018-08-02 21:54:37 +02:00
|
|
|
if (nt->FileHeader.NumberOfSections > ARRAY_SIZE( sections )) goto error;
|
2012-04-03 21:15:10 +02:00
|
|
|
if (header_start + sizeof(*sections) * nt->FileHeader.NumberOfSections > header_end) goto error;
|
2012-03-07 23:22:07 +01:00
|
|
|
/* Some applications (e.g. the Steam version of Borderlands) map over the top of the section headers,
|
|
|
|
* copying the headers into local memory is necessary to properly load such applications. */
|
2012-04-03 21:15:10 +02:00
|
|
|
memcpy(sections, header_start, sizeof(*sections) * nt->FileHeader.NumberOfSections);
|
|
|
|
sec = sections;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
imports = nt->OptionalHeader.DataDirectory + IMAGE_DIRECTORY_ENTRY_IMPORT;
|
|
|
|
if (!imports->Size || !imports->VirtualAddress) imports = NULL;
|
|
|
|
|
2005-01-04 13:04:06 +01:00
|
|
|
/* check for non page-aligned binary */
|
|
|
|
|
2018-02-15 12:23:23 +01:00
|
|
|
if (image_info->image_flags & IMAGE_FLAGS_ImageMappedFlat)
|
2005-01-04 13:04:06 +01:00
|
|
|
{
|
|
|
|
/* unaligned sections, this happens for native subsystem binaries */
|
|
|
|
/* in that case Windows simply maps in the whole file */
|
|
|
|
|
2020-05-06 09:15:56 +02:00
|
|
|
total_size = min( total_size, ROUND_SIZE( 0, st.st_size ));
|
2014-05-13 12:28:58 +02:00
|
|
|
if (map_file_into_view( view, fd, 0, total_size, 0, VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY,
|
2017-09-26 15:03:38 +02:00
|
|
|
removable ) != STATUS_SUCCESS) goto error;
|
2005-01-04 13:04:06 +01:00
|
|
|
|
|
|
|
/* check that all sections are loaded at the right offset */
|
2006-12-07 15:34:36 +01:00
|
|
|
if (nt->OptionalHeader.FileAlignment != nt->OptionalHeader.SectionAlignment) goto error;
|
2005-01-04 13:04:06 +01:00
|
|
|
for (i = 0; i < nt->FileHeader.NumberOfSections; i++)
|
|
|
|
{
|
|
|
|
if (sec[i].VirtualAddress != sec[i].PointerToRawData)
|
|
|
|
goto error; /* Windows refuses to load in that case too */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set the image protections */
|
|
|
|
VIRTUAL_SetProt( view, ptr, total_size,
|
|
|
|
VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY | VPROT_EXEC );
|
|
|
|
|
2008-04-02 20:39:22 +02:00
|
|
|
/* no relocations are performed on non page-aligned binaries */
|
2005-01-04 13:04:06 +01:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/* map all the sections */
|
|
|
|
|
|
|
|
for (i = pos = 0; i < nt->FileHeader.NumberOfSections; i++, sec++)
|
|
|
|
{
|
2006-12-21 12:34:44 +01:00
|
|
|
static const SIZE_T sector_align = 0x1ff;
|
|
|
|
SIZE_T map_size, file_start, file_size, end;
|
2005-08-15 16:50:06 +02:00
|
|
|
|
|
|
|
if (!sec->Misc.VirtualSize)
|
2006-12-21 12:34:44 +01:00
|
|
|
map_size = ROUND_SIZE( 0, sec->SizeOfRawData );
|
2005-08-15 16:50:06 +02:00
|
|
|
else
|
|
|
|
map_size = ROUND_SIZE( 0, sec->Misc.VirtualSize );
|
2006-12-21 12:34:44 +01:00
|
|
|
|
|
|
|
/* file positions are rounded to sector boundaries regardless of OptionalHeader.FileAlignment */
|
|
|
|
file_start = sec->PointerToRawData & ~sector_align;
|
|
|
|
file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
|
|
|
|
if (file_size > map_size) file_size = map_size;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
/* a few sanity checks */
|
2005-08-15 16:50:06 +02:00
|
|
|
end = sec->VirtualAddress + ROUND_SIZE( sec->VirtualAddress, map_size );
|
|
|
|
if (sec->VirtualAddress > total_size || end > total_size || end < sec->VirtualAddress)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2006-12-13 13:06:12 +01:00
|
|
|
WARN_(module)( "Section %.8s too large (%x+%lx/%lx)\n",
|
|
|
|
sec->Name, sec->VirtualAddress, map_size, total_size );
|
2002-09-17 20:54:42 +02:00
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((sec->Characteristics & IMAGE_SCN_MEM_SHARED) &&
|
|
|
|
(sec->Characteristics & IMAGE_SCN_MEM_WRITE))
|
|
|
|
{
|
2006-10-16 13:49:06 +02:00
|
|
|
TRACE_(module)( "mapping shared section %.8s at %p off %x (%x) size %lx (%lx) flags %x\n",
|
2005-08-15 16:50:06 +02:00
|
|
|
sec->Name, ptr + sec->VirtualAddress,
|
|
|
|
sec->PointerToRawData, (int)pos, file_size, map_size,
|
|
|
|
sec->Characteristics );
|
|
|
|
if (map_file_into_view( view, shared_fd, sec->VirtualAddress, map_size, pos,
|
2014-05-13 12:28:58 +02:00
|
|
|
VPROT_COMMITTED | VPROT_READ | VPROT_WRITE, FALSE ) != STATUS_SUCCESS)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
|
|
|
ERR_(module)( "Could not map shared section %.8s\n", sec->Name );
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check if the import directory falls inside this section */
|
|
|
|
if (imports && imports->VirtualAddress >= sec->VirtualAddress &&
|
2005-08-15 16:50:06 +02:00
|
|
|
imports->VirtualAddress < sec->VirtualAddress + map_size)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
|
|
|
UINT_PTR base = imports->VirtualAddress & ~page_mask;
|
|
|
|
UINT_PTR end = base + ROUND_SIZE( imports->VirtualAddress, imports->Size );
|
2005-08-15 16:50:06 +02:00
|
|
|
if (end > sec->VirtualAddress + map_size) end = sec->VirtualAddress + map_size;
|
2004-05-21 22:58:44 +02:00
|
|
|
if (end > base)
|
|
|
|
map_file_into_view( view, shared_fd, base, end - base,
|
|
|
|
pos + (base - sec->VirtualAddress),
|
2014-05-13 12:28:58 +02:00
|
|
|
VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY, FALSE );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2005-08-15 16:50:06 +02:00
|
|
|
pos += map_size;
|
2002-09-17 20:54:42 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2006-10-16 13:49:06 +02:00
|
|
|
TRACE_(module)( "mapping section %.8s at %p off %x size %x virt %x flags %x\n",
|
2002-09-17 20:54:42 +02:00
|
|
|
sec->Name, ptr + sec->VirtualAddress,
|
|
|
|
sec->PointerToRawData, sec->SizeOfRawData,
|
2005-08-15 16:50:06 +02:00
|
|
|
sec->Misc.VirtualSize, sec->Characteristics );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2005-08-15 16:50:06 +02:00
|
|
|
if (!sec->PointerToRawData || !file_size) continue;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2004-05-21 22:58:44 +02:00
|
|
|
/* Note: if the section is not aligned properly map_file_into_view will magically
|
2002-09-17 20:54:42 +02:00
|
|
|
* fall back to read(), so we don't need to check anything here.
|
|
|
|
*/
|
2006-12-21 12:34:44 +01:00
|
|
|
end = file_start + file_size;
|
|
|
|
if (sec->PointerToRawData >= st.st_size ||
|
|
|
|
end > ((st.st_size + sector_align) & ~sector_align) ||
|
|
|
|
end < file_start ||
|
|
|
|
map_file_into_view( view, fd, sec->VirtualAddress, file_size, file_start,
|
2004-05-21 22:58:44 +02:00
|
|
|
VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY,
|
2017-09-26 15:03:38 +02:00
|
|
|
removable ) != STATUS_SUCCESS)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
|
|
|
ERR_(module)( "Could not map section %.8s, file probably truncated\n", sec->Name );
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2005-08-15 16:50:06 +02:00
|
|
|
if (file_size & page_mask)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2005-08-15 16:50:06 +02:00
|
|
|
end = ROUND_SIZE( 0, file_size );
|
|
|
|
if (end > map_size) end = map_size;
|
2002-09-17 20:54:42 +02:00
|
|
|
TRACE_(module)("clearing %p - %p\n",
|
2005-08-15 16:50:06 +02:00
|
|
|
ptr + sec->VirtualAddress + file_size,
|
2002-09-17 20:54:42 +02:00
|
|
|
ptr + sec->VirtualAddress + end );
|
2005-08-15 16:50:06 +02:00
|
|
|
memset( ptr + sec->VirtualAddress + file_size, 0, end - file_size );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set the image protections */
|
|
|
|
|
2006-12-13 13:06:12 +01:00
|
|
|
VIRTUAL_SetProt( view, ptr, ROUND_SIZE( 0, header_size ), VPROT_COMMITTED | VPROT_READ );
|
|
|
|
|
2012-04-03 21:15:11 +02:00
|
|
|
sec = sections;
|
2002-09-17 20:54:42 +02:00
|
|
|
for (i = 0; i < nt->FileHeader.NumberOfSections; i++, sec++)
|
|
|
|
{
|
2006-07-30 09:59:46 +02:00
|
|
|
SIZE_T size;
|
2002-09-17 20:54:42 +02:00
|
|
|
BYTE vprot = VPROT_COMMITTED;
|
2006-07-30 09:59:46 +02:00
|
|
|
|
|
|
|
if (sec->Misc.VirtualSize)
|
|
|
|
size = ROUND_SIZE( sec->VirtualAddress, sec->Misc.VirtualSize );
|
|
|
|
else
|
|
|
|
size = ROUND_SIZE( sec->VirtualAddress, sec->SizeOfRawData );
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
if (sec->Characteristics & IMAGE_SCN_MEM_READ) vprot |= VPROT_READ;
|
2011-11-22 11:03:22 +01:00
|
|
|
if (sec->Characteristics & IMAGE_SCN_MEM_WRITE) vprot |= VPROT_WRITECOPY;
|
2002-09-17 20:54:42 +02:00
|
|
|
if (sec->Characteristics & IMAGE_SCN_MEM_EXECUTE) vprot |= VPROT_EXEC;
|
2006-05-06 15:16:39 +02:00
|
|
|
|
|
|
|
/* Dumb game crack lets the AOEP point into a data section. Adjust. */
|
|
|
|
if ((nt->OptionalHeader.AddressOfEntryPoint >= sec->VirtualAddress) &&
|
|
|
|
(nt->OptionalHeader.AddressOfEntryPoint < sec->VirtualAddress + size))
|
|
|
|
vprot |= VPROT_EXEC;
|
|
|
|
|
2010-06-16 20:06:47 +02:00
|
|
|
if (!VIRTUAL_SetProt( view, ptr + sec->VirtualAddress, size, vprot ) && (vprot & VPROT_EXEC))
|
|
|
|
ERR( "failed to set %08x protection on section %.8s, noexec filesystem?\n",
|
|
|
|
sec->Characteristics, sec->Name );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2005-01-04 13:04:06 +01:00
|
|
|
|
|
|
|
done:
|
2017-09-26 14:11:49 +02:00
|
|
|
|
|
|
|
SERVER_START_REQ( map_view )
|
|
|
|
{
|
|
|
|
req->mapping = wine_server_obj_handle( hmapping );
|
|
|
|
req->access = access;
|
|
|
|
req->base = wine_server_client_ptr( view->base );
|
|
|
|
req->size = view->size;
|
|
|
|
req->start = 0;
|
|
|
|
status = wine_server_call( req );
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
if (status) goto error;
|
|
|
|
|
2017-09-11 19:51:43 +02:00
|
|
|
VIRTUAL_DEBUG_DUMP_VIEW( view );
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
*addr_ptr = ptr;
|
2008-07-24 19:21:33 +02:00
|
|
|
#ifdef VALGRIND_LOAD_PDB_DEBUGINFO
|
2015-08-18 09:25:40 +02:00
|
|
|
VALGRIND_LOAD_PDB_DEBUGINFO(fd, ptr, total_size, ptr - base);
|
2008-07-24 19:21:33 +02:00
|
|
|
#endif
|
2010-03-03 20:04:55 +01:00
|
|
|
if (ptr != base) return STATUS_IMAGE_NOT_AT_BASE;
|
2002-09-17 20:54:42 +02:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
|
|
|
|
error:
|
2004-05-25 03:29:24 +02:00
|
|
|
if (view) delete_view( view );
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2002-09-17 20:54:42 +02:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-01-30 14:16:23 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_map_section
|
|
|
|
*
|
|
|
|
* Map a file section into memory.
|
|
|
|
*/
|
2019-08-01 10:07:44 +02:00
|
|
|
NTSTATUS virtual_map_section( HANDLE handle, PVOID *addr_ptr, unsigned short zero_bits_64, SIZE_T commit_size,
|
2019-08-01 10:07:38 +02:00
|
|
|
const LARGE_INTEGER *offset_ptr, SIZE_T *size_ptr, ULONG alloc_type,
|
|
|
|
ULONG protect, pe_image_info_t *image_info )
|
2018-01-30 14:16:23 +01:00
|
|
|
{
|
|
|
|
NTSTATUS res;
|
|
|
|
mem_size_t full_size;
|
|
|
|
ACCESS_MASK access;
|
2019-06-20 16:05:38 +02:00
|
|
|
SIZE_T size;
|
2018-01-30 14:16:23 +01:00
|
|
|
int unix_handle = -1, needs_close;
|
|
|
|
unsigned int vprot, sec_flags;
|
|
|
|
struct file_view *view;
|
|
|
|
HANDLE shared_file;
|
|
|
|
LARGE_INTEGER offset;
|
|
|
|
sigset_t sigset;
|
|
|
|
|
|
|
|
offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0;
|
|
|
|
|
|
|
|
switch(protect)
|
|
|
|
{
|
|
|
|
case PAGE_NOACCESS:
|
|
|
|
case PAGE_READONLY:
|
|
|
|
case PAGE_WRITECOPY:
|
|
|
|
access = SECTION_MAP_READ;
|
|
|
|
break;
|
|
|
|
case PAGE_READWRITE:
|
|
|
|
access = SECTION_MAP_WRITE;
|
|
|
|
break;
|
|
|
|
case PAGE_EXECUTE:
|
|
|
|
case PAGE_EXECUTE_READ:
|
|
|
|
case PAGE_EXECUTE_WRITECOPY:
|
|
|
|
access = SECTION_MAP_READ | SECTION_MAP_EXECUTE;
|
|
|
|
break;
|
|
|
|
case PAGE_EXECUTE_READWRITE:
|
|
|
|
access = SECTION_MAP_WRITE | SECTION_MAP_EXECUTE;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return STATUS_INVALID_PAGE_PROTECTION;
|
|
|
|
}
|
|
|
|
|
|
|
|
SERVER_START_REQ( get_mapping_info )
|
|
|
|
{
|
|
|
|
req->handle = wine_server_obj_handle( handle );
|
|
|
|
req->access = access;
|
|
|
|
wine_server_set_reply( req, image_info, sizeof(*image_info) );
|
|
|
|
res = wine_server_call( req );
|
|
|
|
sec_flags = reply->flags;
|
|
|
|
full_size = reply->size;
|
|
|
|
shared_file = wine_server_ptr_handle( reply->shared_file );
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
if (res) return res;
|
|
|
|
|
2020-05-29 15:35:16 +02:00
|
|
|
if ((res = unix_funcs->server_get_unix_fd( handle, 0, &unix_handle, &needs_close, NULL, NULL ))) goto done;
|
2018-01-30 14:16:23 +01:00
|
|
|
|
|
|
|
if (sec_flags & SEC_IMAGE)
|
|
|
|
{
|
|
|
|
if (shared_file)
|
|
|
|
{
|
|
|
|
int shared_fd, shared_needs_close;
|
|
|
|
|
2020-05-29 15:35:16 +02:00
|
|
|
if ((res = unix_funcs->server_get_unix_fd( shared_file, FILE_READ_DATA|FILE_WRITE_DATA,
|
|
|
|
&shared_fd, &shared_needs_close, NULL, NULL ))) goto done;
|
2019-08-01 10:07:44 +02:00
|
|
|
res = map_image( handle, access, unix_handle, alloc_type & MEM_TOP_DOWN, zero_bits_64, image_info,
|
2018-01-30 14:16:23 +01:00
|
|
|
shared_fd, needs_close, addr_ptr );
|
|
|
|
if (shared_needs_close) close( shared_fd );
|
|
|
|
close_handle( shared_file );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2019-08-01 10:07:44 +02:00
|
|
|
res = map_image( handle, access, unix_handle, alloc_type & MEM_TOP_DOWN, zero_bits_64, image_info,
|
2019-06-20 16:05:38 +02:00
|
|
|
-1, needs_close, addr_ptr );
|
2018-01-30 14:16:23 +01:00
|
|
|
}
|
|
|
|
if (needs_close) close( unix_handle );
|
2018-02-15 12:23:23 +01:00
|
|
|
if (res >= 0) *size_ptr = image_info->map_size;
|
2018-01-30 14:16:23 +01:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = STATUS_INVALID_PARAMETER;
|
|
|
|
if (offset.QuadPart >= full_size) goto done;
|
|
|
|
if (*size_ptr)
|
|
|
|
{
|
|
|
|
size = *size_ptr;
|
|
|
|
if (size > full_size - offset.QuadPart)
|
|
|
|
{
|
|
|
|
res = STATUS_INVALID_VIEW_SIZE;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
size = full_size - offset.QuadPart;
|
|
|
|
if (size != full_size - offset.QuadPart) /* truncated */
|
|
|
|
{
|
|
|
|
WARN( "Files larger than 4Gb (%s) not supported on this platform\n",
|
|
|
|
wine_dbgstr_longlong(full_size) );
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!(size = ROUND_SIZE( 0, size ))) goto done; /* wrap-around */
|
|
|
|
|
|
|
|
/* Reserve a properly aligned area */
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
|
|
|
get_vprot_flags( protect, &vprot, sec_flags & SEC_IMAGE );
|
|
|
|
vprot |= sec_flags;
|
|
|
|
if (!(sec_flags & SEC_RESERVE)) vprot |= VPROT_COMMITTED;
|
2020-04-30 10:07:36 +02:00
|
|
|
res = map_view( &view, *addr_ptr, size, alloc_type & MEM_TOP_DOWN, vprot, zero_bits_64 );
|
2018-01-30 14:16:23 +01:00
|
|
|
if (res)
|
|
|
|
{
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Map the file */
|
|
|
|
|
|
|
|
TRACE( "handle=%p size=%lx offset=%x%08x\n", handle, size, offset.u.HighPart, offset.u.LowPart );
|
|
|
|
|
|
|
|
res = map_file_into_view( view, unix_handle, 0, size, offset.QuadPart, vprot, needs_close );
|
|
|
|
if (res == STATUS_SUCCESS)
|
|
|
|
{
|
|
|
|
SERVER_START_REQ( map_view )
|
|
|
|
{
|
|
|
|
req->mapping = wine_server_obj_handle( handle );
|
|
|
|
req->access = access;
|
|
|
|
req->base = wine_server_client_ptr( view->base );
|
|
|
|
req->size = size;
|
|
|
|
req->start = offset.QuadPart;
|
|
|
|
res = wine_server_call( req );
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res == STATUS_SUCCESS)
|
|
|
|
{
|
|
|
|
*addr_ptr = view->base;
|
|
|
|
*size_ptr = size;
|
|
|
|
VIRTUAL_DEBUG_DUMP_VIEW( view );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
ERR( "mapping %p %lx %x%08x failed\n", view->base, size, offset.u.HighPart, offset.u.LowPart );
|
|
|
|
delete_view( view );
|
|
|
|
}
|
|
|
|
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (needs_close) close( unix_handle );
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-05 16:09:51 +02:00
|
|
|
struct alloc_virtual_heap
|
|
|
|
{
|
|
|
|
void *base;
|
|
|
|
size_t size;
|
|
|
|
};
|
|
|
|
|
2020-05-17 11:04:47 +02:00
|
|
|
/* callback for mmap_enum_reserved_areas to allocate space for the virtual heap */
|
|
|
|
static int CDECL alloc_virtual_heap( void *base, SIZE_T size, void *arg )
|
2008-10-28 13:51:36 +01:00
|
|
|
{
|
2017-09-05 16:09:51 +02:00
|
|
|
struct alloc_virtual_heap *alloc = arg;
|
2008-10-31 13:00:59 +01:00
|
|
|
|
2008-11-07 11:04:07 +01:00
|
|
|
if (is_beyond_limit( base, size, address_space_limit )) address_space_limit = (char *)base + size;
|
2017-09-05 16:09:51 +02:00
|
|
|
if (size < alloc->size) return 0;
|
2010-12-16 20:02:25 +01:00
|
|
|
if (is_win64 && base < (void *)0x80000000) return 0;
|
2017-09-05 16:09:51 +02:00
|
|
|
alloc->base = wine_anon_mmap( (char *)base + size - alloc->size, alloc->size,
|
|
|
|
PROT_READ|PROT_WRITE, MAP_FIXED );
|
|
|
|
return (alloc->base != (void *)-1);
|
2008-10-28 13:51:36 +01:00
|
|
|
}
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
2004-01-07 05:50:11 +01:00
|
|
|
* virtual_init
|
2002-09-17 20:54:42 +02:00
|
|
|
*/
|
2006-07-24 15:19:32 +02:00
|
|
|
void virtual_init(void)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2006-07-24 14:00:19 +02:00
|
|
|
const char *preload;
|
2017-09-06 09:58:18 +02:00
|
|
|
struct alloc_virtual_heap alloc_views;
|
2010-12-16 10:25:14 +01:00
|
|
|
size_t size;
|
2008-10-31 13:00:59 +01:00
|
|
|
|
2019-10-28 21:07:53 +01:00
|
|
|
#if !defined(__i386__) && !defined(__x86_64__) && !defined(__arm__) && !defined(__aarch64__)
|
2013-01-08 14:00:06 +01:00
|
|
|
page_size = sysconf( _SC_PAGESIZE );
|
2002-09-17 20:54:42 +02:00
|
|
|
page_mask = page_size - 1;
|
|
|
|
/* Make sure we have a power of 2 */
|
|
|
|
assert( !(page_size & page_mask) );
|
|
|
|
page_shift = 0;
|
|
|
|
while ((1 << page_shift) != page_size) page_shift++;
|
2017-09-05 16:17:39 +02:00
|
|
|
#ifdef _WIN64
|
|
|
|
address_space_limit = (void *)(((1UL << 47) - 1) & ~page_mask);
|
|
|
|
#else
|
|
|
|
address_space_limit = (void *)~page_mask;
|
|
|
|
#endif
|
|
|
|
user_space_limit = working_set_limit = address_space_limit;
|
|
|
|
#endif
|
2006-07-24 14:00:19 +02:00
|
|
|
if ((preload = getenv("WINEPRELOADRESERVE")))
|
|
|
|
{
|
|
|
|
unsigned long start, end;
|
|
|
|
if (sscanf( preload, "%lx-%lx", &start, &end ) == 2)
|
|
|
|
{
|
|
|
|
preload_reserve_start = (void *)start;
|
|
|
|
preload_reserve_end = (void *)end;
|
2017-09-27 09:30:05 +02:00
|
|
|
/* some apps start inside the DOS area */
|
2017-10-09 10:44:29 +02:00
|
|
|
if (preload_reserve_start)
|
|
|
|
address_space_start = min( address_space_start, preload_reserve_start );
|
2006-07-24 14:00:19 +02:00
|
|
|
}
|
|
|
|
}
|
2008-10-31 13:00:59 +01:00
|
|
|
|
2020-04-28 13:26:34 +02:00
|
|
|
size = ROUND_SIZE( 0, sizeof(TEB) ) + max( MINSIGSTKSZ, 8192 );
|
|
|
|
/* find the first power of two not smaller than size */
|
|
|
|
signal_stack_align = page_shift;
|
|
|
|
while ((1u << signal_stack_align) < size) signal_stack_align++;
|
|
|
|
signal_stack_mask = (1 << signal_stack_align) - 1;
|
|
|
|
signal_stack_size = (1 << signal_stack_align) - ROUND_SIZE( 0, sizeof(TEB) );
|
|
|
|
|
2017-09-06 09:58:18 +02:00
|
|
|
/* try to find space in a reserved area for the views and pages protection table */
|
2017-09-05 16:17:39 +02:00
|
|
|
#ifdef _WIN64
|
|
|
|
pages_vprot_size = ((size_t)address_space_limit >> page_shift >> pages_vprot_shift) + 1;
|
ntdll: Introduce free_ranges indexing sequence.
This is an ordered range sequence used to keep track of free address
ranges.
The sequence contains an entry for every free address range, with base
pointing to the first free address and end pointing to the next first
used address. It is initialized to [0, ~0] for convenience, so that
there's always a range before or after a view.
In the worst case scenario, where memory is entirely fragmented, there's
going to be one more range than allocated views, but in general there's
much less. In any case, because of cache locality, iterating in the
contiguous sequence is much faster than traversing the view rbtree.
In theory there can be a performance hit when allocating or deleting a
view, as we may have to move the end of the sequence when a range is
split or merged. But in practice and given the usually low number of
ranges, this is not an issue.
The default and maximum sequence size can hold up to 65536 ranges, which
is much more than enough in general, and performance is probably going
to be bad before reaching the limit anyway. The code currently asserts
when reaching the limit, although we could possibly grow the sequence.
Signed-off-by: Rémi Bernon <rbernon@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
2020-05-29 20:11:18 +02:00
|
|
|
alloc_views.size = 2 * view_block_size + pages_vprot_size * sizeof(*pages_vprot);
|
2017-09-05 16:17:39 +02:00
|
|
|
#else
|
ntdll: Introduce free_ranges indexing sequence.
This is an ordered range sequence used to keep track of free address
ranges.
The sequence contains an entry for every free address range, with base
pointing to the first free address and end pointing to the next first
used address. It is initialized to [0, ~0] for convenience, so that
there's always a range before or after a view.
In the worst case scenario, where memory is entirely fragmented, there's
going to be one more range than allocated views, but in general there's
much less. In any case, because of cache locality, iterating in the
contiguous sequence is much faster than traversing the view rbtree.
In theory there can be a performance hit when allocating or deleting a
view, as we may have to move the end of the sequence when a range is
split or merged. But in practice and given the usually low number of
ranges, this is not an issue.
The default and maximum sequence size can hold up to 65536 ranges, which
is much more than enough in general, and performance is probably going
to be bad before reaching the limit anyway. The code currently asserts
when reaching the limit, although we could possibly grow the sequence.
Signed-off-by: Rémi Bernon <rbernon@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
2020-05-29 20:11:18 +02:00
|
|
|
alloc_views.size = 2 * view_block_size + (1U << (32 - page_shift));
|
2017-09-05 16:17:39 +02:00
|
|
|
#endif
|
2020-05-17 11:04:47 +02:00
|
|
|
if (unix_funcs->mmap_enum_reserved_areas( alloc_virtual_heap, &alloc_views, 1 ))
|
|
|
|
unix_funcs->mmap_remove_reserved_area( alloc_views.base, alloc_views.size );
|
2017-09-06 09:58:18 +02:00
|
|
|
else
|
|
|
|
alloc_views.base = wine_anon_mmap( NULL, alloc_views.size, PROT_READ | PROT_WRITE, 0 );
|
|
|
|
|
|
|
|
assert( alloc_views.base != (void *)-1 );
|
|
|
|
view_block_start = alloc_views.base;
|
|
|
|
view_block_end = view_block_start + view_block_size / sizeof(*view_block_start);
|
ntdll: Introduce free_ranges indexing sequence.
This is an ordered range sequence used to keep track of free address
ranges.
The sequence contains an entry for every free address range, with base
pointing to the first free address and end pointing to the next first
used address. It is initialized to [0, ~0] for convenience, so that
there's always a range before or after a view.
In the worst case scenario, where memory is entirely fragmented, there's
going to be one more range than allocated views, but in general there's
much less. In any case, because of cache locality, iterating in the
contiguous sequence is much faster than traversing the view rbtree.
In theory there can be a performance hit when allocating or deleting a
view, as we may have to move the end of the sequence when a range is
split or merged. But in practice and given the usually low number of
ranges, this is not an issue.
The default and maximum sequence size can hold up to 65536 ranges, which
is much more than enough in general, and performance is probably going
to be bad before reaching the limit anyway. The code currently asserts
when reaching the limit, although we could possibly grow the sequence.
Signed-off-by: Rémi Bernon <rbernon@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
2020-05-29 20:11:18 +02:00
|
|
|
free_ranges = (void *)((char *)alloc_views.base + view_block_size);
|
|
|
|
pages_vprot = (void *)((char *)alloc_views.base + 2 * view_block_size);
|
2017-09-07 19:31:42 +02:00
|
|
|
wine_rb_init( &views_tree, compare_view );
|
2010-06-15 11:36:40 +02:00
|
|
|
|
ntdll: Introduce free_ranges indexing sequence.
This is an ordered range sequence used to keep track of free address
ranges.
The sequence contains an entry for every free address range, with base
pointing to the first free address and end pointing to the next first
used address. It is initialized to [0, ~0] for convenience, so that
there's always a range before or after a view.
In the worst case scenario, where memory is entirely fragmented, there's
going to be one more range than allocated views, but in general there's
much less. In any case, because of cache locality, iterating in the
contiguous sequence is much faster than traversing the view rbtree.
In theory there can be a performance hit when allocating or deleting a
view, as we may have to move the end of the sequence when a range is
split or merged. But in practice and given the usually low number of
ranges, this is not an issue.
The default and maximum sequence size can hold up to 65536 ranges, which
is much more than enough in general, and performance is probably going
to be bad before reaching the limit anyway. The code currently asserts
when reaching the limit, although we could possibly grow the sequence.
Signed-off-by: Rémi Bernon <rbernon@codeweavers.com>
Signed-off-by: Alexandre Julliard <julliard@winehq.org>
2020-05-29 20:11:18 +02:00
|
|
|
free_ranges[0].base = (void *)0;
|
|
|
|
free_ranges[0].end = (void *)~0;
|
|
|
|
free_ranges_end = free_ranges + 1;
|
|
|
|
|
2010-12-16 10:25:14 +01:00
|
|
|
/* make the DOS area accessible (except the low 64K) to hide bugs in broken apps like Excel 2003 */
|
|
|
|
size = (char *)address_space_start - (char *)0x10000;
|
2020-05-17 11:04:47 +02:00
|
|
|
if (size && unix_funcs->mmap_is_in_reserved_area( (void*)0x10000, size ) == 1)
|
2010-12-16 10:25:14 +01:00
|
|
|
wine_anon_mmap( (void *)0x10000, size, PROT_READ | PROT_WRITE, MAP_FIXED );
|
2004-01-07 05:50:11 +01:00
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
|
2008-11-03 13:23:48 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_get_system_info
|
|
|
|
*/
|
|
|
|
void virtual_get_system_info( SYSTEM_BASIC_INFORMATION *info )
|
|
|
|
{
|
2016-08-11 08:34:21 +02:00
|
|
|
#ifdef HAVE_SYSINFO
|
2016-07-08 05:40:22 +02:00
|
|
|
struct sysinfo sinfo;
|
|
|
|
#endif
|
|
|
|
|
2009-08-27 19:30:50 +02:00
|
|
|
info->unknown = 0;
|
|
|
|
info->KeMaximumIncrement = 0; /* FIXME */
|
|
|
|
info->PageSize = page_size;
|
|
|
|
info->MmLowestPhysicalPage = 1;
|
|
|
|
info->MmHighestPhysicalPage = 0x7fffffff / page_size;
|
2016-08-11 08:34:21 +02:00
|
|
|
#ifdef HAVE_SYSINFO
|
2016-07-08 05:40:22 +02:00
|
|
|
if (!sysinfo(&sinfo))
|
|
|
|
{
|
|
|
|
ULONG64 total = (ULONG64)sinfo.totalram * sinfo.mem_unit;
|
|
|
|
info->MmHighestPhysicalPage = max(1, total / page_size);
|
|
|
|
}
|
|
|
|
#endif
|
2009-08-27 19:30:50 +02:00
|
|
|
info->MmNumberOfPhysicalPages = info->MmHighestPhysicalPage - info->MmLowestPhysicalPage;
|
2020-04-30 10:07:36 +02:00
|
|
|
info->AllocationGranularity = granularity_mask + 1;
|
2009-08-27 19:30:50 +02:00
|
|
|
info->LowestUserAddress = (void *)0x10000;
|
|
|
|
info->HighestUserAddress = (char *)user_space_limit - 1;
|
2015-01-21 13:24:14 +01:00
|
|
|
info->ActiveProcessorsAffinityMask = get_system_affinity_mask();
|
2009-08-27 19:30:50 +02:00
|
|
|
info->NumberOfProcessors = NtCurrentTeb()->Peb->NumberOfProcessors;
|
2008-11-03 13:23:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-14 17:40:54 +01:00
|
|
|
/***********************************************************************
|
2011-09-07 22:31:39 +02:00
|
|
|
* virtual_create_builtin_view
|
2008-11-14 17:40:54 +01:00
|
|
|
*/
|
2010-08-23 17:12:53 +02:00
|
|
|
NTSTATUS virtual_create_builtin_view( void *module )
|
2008-11-14 17:40:54 +01:00
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
sigset_t sigset;
|
2010-08-23 17:12:53 +02:00
|
|
|
IMAGE_NT_HEADERS *nt = RtlImageNtHeader( module );
|
|
|
|
SIZE_T size = nt->OptionalHeader.SizeOfImage;
|
|
|
|
IMAGE_SECTION_HEADER *sec;
|
2010-10-19 15:34:36 +02:00
|
|
|
struct file_view *view;
|
2010-08-23 17:12:53 +02:00
|
|
|
void *base;
|
|
|
|
int i;
|
2008-11-14 17:40:54 +01:00
|
|
|
|
2010-08-23 17:12:53 +02:00
|
|
|
size = ROUND_SIZE( module, size );
|
|
|
|
base = ROUND_ADDR( module, page_mask );
|
2008-11-14 17:40:54 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2017-09-08 15:37:12 +02:00
|
|
|
status = create_view( &view, base, size, SEC_IMAGE | SEC_FILE | VPROT_SYSTEM |
|
2010-08-23 17:12:53 +02:00
|
|
|
VPROT_COMMITTED | VPROT_READ | VPROT_WRITECOPY | VPROT_EXEC );
|
2017-09-11 19:50:30 +02:00
|
|
|
if (!status)
|
|
|
|
{
|
|
|
|
TRACE( "created %p-%p\n", base, (char *)base + size );
|
2010-08-23 17:12:53 +02:00
|
|
|
|
2017-09-11 19:50:30 +02:00
|
|
|
/* The PE header is always read-only, no write, no execute. */
|
|
|
|
set_page_vprot( base, page_size, VPROT_COMMITTED | VPROT_READ );
|
2010-11-08 03:34:27 +01:00
|
|
|
|
2017-09-11 19:50:30 +02:00
|
|
|
sec = (IMAGE_SECTION_HEADER *)((char *)&nt->OptionalHeader + nt->FileHeader.SizeOfOptionalHeader);
|
|
|
|
for (i = 0; i < nt->FileHeader.NumberOfSections; i++)
|
|
|
|
{
|
|
|
|
BYTE flags = VPROT_COMMITTED;
|
2010-08-23 17:12:53 +02:00
|
|
|
|
2017-09-11 19:50:30 +02:00
|
|
|
if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) flags |= VPROT_EXEC;
|
|
|
|
if (sec[i].Characteristics & IMAGE_SCN_MEM_READ) flags |= VPROT_READ;
|
|
|
|
if (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE) flags |= VPROT_WRITE;
|
|
|
|
set_page_vprot( (char *)base + sec[i].VirtualAddress, sec[i].Misc.VirtualSize, flags );
|
|
|
|
}
|
2017-09-11 19:51:43 +02:00
|
|
|
VIRTUAL_DEBUG_DUMP_VIEW( view );
|
2010-08-23 17:12:53 +02:00
|
|
|
}
|
2017-09-11 19:50:30 +02:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2008-11-14 17:40:54 +01:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-04-30 10:05:56 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_alloc_first_teb
|
|
|
|
*/
|
|
|
|
TEB *virtual_alloc_first_teb(void)
|
|
|
|
{
|
|
|
|
TEB *teb;
|
|
|
|
PEB *peb;
|
|
|
|
SIZE_T peb_size = page_size;
|
|
|
|
SIZE_T teb_size = signal_stack_mask + 1;
|
|
|
|
SIZE_T total = 32 * teb_size;
|
|
|
|
|
|
|
|
NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&teb_block, 0, &total,
|
|
|
|
MEM_RESERVE | MEM_TOP_DOWN, PAGE_READWRITE );
|
|
|
|
teb_block_pos = 30;
|
|
|
|
teb = (TEB *)((char *)teb_block + 30 * teb_size);
|
|
|
|
peb = (PEB *)((char *)teb_block + 32 * teb_size - peb_size);
|
|
|
|
NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&teb, 0, &teb_size, MEM_COMMIT, PAGE_READWRITE );
|
|
|
|
NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&peb, 0, &peb_size, MEM_COMMIT, PAGE_READWRITE );
|
|
|
|
|
|
|
|
teb->Peb = peb;
|
|
|
|
teb->Tib.Self = &teb->Tib;
|
|
|
|
teb->Tib.ExceptionList = (void *)~0ul;
|
|
|
|
teb->Tib.StackBase = (void *)~0ul;
|
|
|
|
teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer;
|
|
|
|
teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer);
|
|
|
|
use_locks = TRUE;
|
|
|
|
return teb;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-04-28 13:34:57 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_alloc_teb
|
|
|
|
*/
|
|
|
|
NTSTATUS virtual_alloc_teb( TEB **ret_teb )
|
|
|
|
{
|
2020-04-30 10:05:56 +02:00
|
|
|
sigset_t sigset;
|
|
|
|
TEB *teb = NULL;
|
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
|
|
|
SIZE_T teb_size = signal_stack_mask + 1;
|
2020-04-28 13:34:57 +02:00
|
|
|
|
2020-04-30 10:05:56 +02:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
if (next_free_teb)
|
|
|
|
{
|
|
|
|
teb = next_free_teb;
|
|
|
|
next_free_teb = *(TEB **)teb;
|
|
|
|
memset( teb, 0, sizeof(*teb) );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!teb_block_pos)
|
|
|
|
{
|
|
|
|
void *addr = NULL;
|
|
|
|
SIZE_T total = 32 * teb_size;
|
2020-04-28 13:34:57 +02:00
|
|
|
|
2020-04-30 10:05:56 +02:00
|
|
|
if ((status = NtAllocateVirtualMemory( NtCurrentProcess(), &addr, 0, &total,
|
|
|
|
MEM_RESERVE, PAGE_READWRITE )))
|
|
|
|
{
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
teb_block = addr;
|
|
|
|
teb_block_pos = 32;
|
|
|
|
}
|
|
|
|
teb = (TEB *)((char *)teb_block + --teb_block_pos * teb_size);
|
|
|
|
NtAllocateVirtualMemory( NtCurrentProcess(), (void **)&teb, 0, &teb_size,
|
|
|
|
MEM_COMMIT, PAGE_READWRITE );
|
|
|
|
}
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
|
|
|
*ret_teb = teb;
|
|
|
|
teb->Peb = NtCurrentTeb()->Peb;
|
2020-04-28 13:34:57 +02:00
|
|
|
teb->Tib.Self = &teb->Tib;
|
|
|
|
teb->Tib.ExceptionList = (void *)~0UL;
|
|
|
|
teb->StaticUnicodeString.Buffer = teb->StaticUnicodeBuffer;
|
|
|
|
teb->StaticUnicodeString.MaximumLength = sizeof(teb->StaticUnicodeBuffer);
|
2020-06-01 12:52:05 +02:00
|
|
|
if ((status = unix_funcs->alloc_thread( teb )))
|
2020-04-28 13:34:57 +02:00
|
|
|
{
|
2020-04-30 10:05:56 +02:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
*(TEB **)teb = next_free_teb;
|
|
|
|
next_free_teb = teb;
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2020-04-28 13:34:57 +02:00
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* virtual_free_teb
|
|
|
|
*/
|
|
|
|
void virtual_free_teb( TEB *teb )
|
|
|
|
{
|
|
|
|
struct ntdll_thread_data *thread_data = (struct ntdll_thread_data *)&teb->GdiTebBatch;
|
|
|
|
SIZE_T size;
|
2020-04-30 10:05:56 +02:00
|
|
|
sigset_t sigset;
|
2020-04-28 13:34:57 +02:00
|
|
|
|
2020-06-01 12:52:05 +02:00
|
|
|
unix_funcs->free_thread( teb );
|
2020-04-28 13:34:57 +02:00
|
|
|
if (teb->DeallocationStack)
|
|
|
|
{
|
|
|
|
size = 0;
|
|
|
|
NtFreeVirtualMemory( GetCurrentProcess(), &teb->DeallocationStack, &size, MEM_RELEASE );
|
|
|
|
}
|
|
|
|
if (thread_data->start_stack)
|
|
|
|
{
|
|
|
|
size = 0;
|
|
|
|
NtFreeVirtualMemory( GetCurrentProcess(), &thread_data->start_stack, &size, MEM_RELEASE );
|
|
|
|
}
|
2020-04-30 10:05:56 +02:00
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
*(TEB **)teb = next_free_teb;
|
|
|
|
next_free_teb = teb;
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2020-04-28 13:34:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-01 14:11:44 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_alloc_thread_stack
|
|
|
|
*/
|
2019-06-14 17:32:43 +02:00
|
|
|
NTSTATUS virtual_alloc_thread_stack( INITIAL_TEB *stack, SIZE_T reserve_size, SIZE_T commit_size, SIZE_T *pthread_size )
|
2008-04-01 14:11:44 +02:00
|
|
|
{
|
2010-10-19 15:34:36 +02:00
|
|
|
struct file_view *view;
|
2008-04-01 14:11:44 +02:00
|
|
|
NTSTATUS status;
|
|
|
|
sigset_t sigset;
|
2017-12-26 10:14:38 +01:00
|
|
|
SIZE_T size, extra_size = 0;
|
2008-04-01 14:11:44 +02:00
|
|
|
|
2009-02-23 14:24:59 +01:00
|
|
|
if (!reserve_size || !commit_size)
|
2008-04-01 14:11:44 +02:00
|
|
|
{
|
2009-02-23 14:24:59 +01:00
|
|
|
IMAGE_NT_HEADERS *nt = RtlImageNtHeader( NtCurrentTeb()->Peb->ImageBaseAddress );
|
|
|
|
if (!reserve_size) reserve_size = nt->OptionalHeader.SizeOfStackReserve;
|
|
|
|
if (!commit_size) commit_size = nt->OptionalHeader.SizeOfStackCommit;
|
2008-04-01 14:11:44 +02:00
|
|
|
}
|
2009-02-23 14:24:59 +01:00
|
|
|
|
|
|
|
size = max( reserve_size, commit_size );
|
|
|
|
if (size < 1024 * 1024) size = 1024 * 1024; /* Xlib needs a large stack */
|
|
|
|
size = (size + 0xffff) & ~0xffff; /* round to 64K boundary */
|
2017-12-26 10:14:38 +01:00
|
|
|
if (pthread_size) *pthread_size = extra_size = max( page_size, ROUND_SIZE( 0, *pthread_size ));
|
2009-02-23 14:24:59 +01:00
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
2020-04-30 10:07:36 +02:00
|
|
|
if ((status = map_view( &view, NULL, size + extra_size, FALSE,
|
2019-06-13 12:48:31 +02:00
|
|
|
VPROT_READ | VPROT_WRITE | VPROT_COMMITTED, 0 )) != STATUS_SUCCESS)
|
2009-02-23 14:24:59 +01:00
|
|
|
goto done;
|
|
|
|
|
2008-04-01 14:11:44 +02:00
|
|
|
#ifdef VALGRIND_STACK_REGISTER
|
2009-02-23 14:24:59 +01:00
|
|
|
VALGRIND_STACK_REGISTER( view->base, (char *)view->base + view->size );
|
2008-04-01 14:11:44 +02:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* setup no access guard page */
|
2017-09-22 12:13:50 +02:00
|
|
|
set_page_vprot( view->base, page_size, VPROT_COMMITTED );
|
|
|
|
set_page_vprot( (char *)view->base + page_size, page_size,
|
|
|
|
VPROT_READ | VPROT_WRITE | VPROT_COMMITTED | VPROT_GUARD );
|
|
|
|
mprotect_range( view->base, 2 * page_size, 0, 0 );
|
2017-09-11 19:51:43 +02:00
|
|
|
VIRTUAL_DEBUG_DUMP_VIEW( view );
|
2008-04-01 14:11:44 +02:00
|
|
|
|
2017-11-29 10:44:21 +01:00
|
|
|
if (extra_size)
|
|
|
|
{
|
|
|
|
struct file_view *extra_view;
|
|
|
|
|
|
|
|
/* shrink the first view and create a second one for the extra size */
|
|
|
|
/* this allows the app to free the stack without freeing the thread start portion */
|
|
|
|
view->size -= extra_size;
|
|
|
|
status = create_view( &extra_view, (char *)view->base + view->size, extra_size,
|
|
|
|
VPROT_READ | VPROT_WRITE | VPROT_COMMITTED );
|
|
|
|
if (status != STATUS_SUCCESS)
|
|
|
|
{
|
|
|
|
unmap_area( (char *)view->base + view->size, extra_size );
|
|
|
|
delete_view( view );
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-04-01 14:11:44 +02:00
|
|
|
/* note: limit is lower than base since the stack grows down */
|
2019-06-14 17:32:43 +02:00
|
|
|
stack->OldStackBase = 0;
|
|
|
|
stack->OldStackLimit = 0;
|
|
|
|
stack->DeallocationStack = view->base;
|
|
|
|
stack->StackBase = (char *)view->base + view->size;
|
|
|
|
stack->StackLimit = (char *)view->base + 2 * page_size;
|
2008-04-01 14:11:44 +02:00
|
|
|
done:
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-06-26 21:10:57 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_clear_thread_stack
|
|
|
|
*
|
|
|
|
* Clear the stack contents before calling the main entry point, some broken apps need that.
|
|
|
|
*/
|
2017-12-04 13:56:32 +01:00
|
|
|
void virtual_clear_thread_stack( void *stack_end )
|
2008-06-26 21:10:57 +02:00
|
|
|
{
|
|
|
|
void *stack = NtCurrentTeb()->Tib.StackLimit;
|
2017-12-04 13:56:32 +01:00
|
|
|
size_t size = (char *)stack_end - (char *)stack;
|
2008-06-26 21:10:57 +02:00
|
|
|
|
2017-12-04 13:56:32 +01:00
|
|
|
wine_anon_mmap( stack, size, PROT_READ | PROT_WRITE, MAP_FIXED );
|
|
|
|
if (force_exec_prot) mprotect( stack, size, PROT_READ | PROT_WRITE | PROT_EXEC );
|
2008-06-26 21:10:57 +02:00
|
|
|
}
|
|
|
|
|
2019-06-14 17:32:43 +02:00
|
|
|
/**********************************************************************
|
|
|
|
* RtlCreateUserStack (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI RtlCreateUserStack( SIZE_T commit, SIZE_T reserve, ULONG zero_bits,
|
|
|
|
SIZE_T commit_align, SIZE_T reserve_align, INITIAL_TEB *stack )
|
|
|
|
{
|
|
|
|
TRACE("commit %#lx, reserve %#lx, zero_bits %u, commit_align %#lx, reserve_align %#lx, stack %p\n",
|
|
|
|
commit, reserve, zero_bits, commit_align, reserve_align, stack);
|
|
|
|
|
|
|
|
if (!commit_align || !reserve_align)
|
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
if (!commit || !reserve)
|
|
|
|
{
|
|
|
|
IMAGE_NT_HEADERS *nt = RtlImageNtHeader( NtCurrentTeb()->Peb->ImageBaseAddress );
|
|
|
|
if (!reserve) reserve = nt->OptionalHeader.SizeOfStackReserve;
|
|
|
|
if (!commit) commit = nt->OptionalHeader.SizeOfStackCommit;
|
|
|
|
}
|
|
|
|
|
|
|
|
reserve = (reserve + reserve_align - 1) & ~(reserve_align - 1);
|
|
|
|
commit = (commit + commit_align - 1) & ~(commit_align - 1);
|
|
|
|
|
|
|
|
return virtual_alloc_thread_stack( stack, reserve, commit, NULL );
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* RtlFreeUserStack (NTDLL.@)
|
|
|
|
*/
|
|
|
|
void WINAPI RtlFreeUserStack( void *stack )
|
|
|
|
{
|
|
|
|
SIZE_T size = 0;
|
|
|
|
|
|
|
|
TRACE("stack %p\n", stack);
|
|
|
|
|
|
|
|
NtFreeVirtualMemory( NtCurrentProcess(), &stack, &size, MEM_RELEASE );
|
|
|
|
}
|
2008-06-26 21:10:57 +02:00
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
2008-11-25 12:02:16 +01:00
|
|
|
* virtual_handle_fault
|
2002-09-17 20:54:42 +02:00
|
|
|
*/
|
2015-02-10 14:54:25 +01:00
|
|
|
NTSTATUS virtual_handle_fault( LPCVOID addr, DWORD err, BOOL on_signal_stack )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2005-07-15 13:43:09 +02:00
|
|
|
NTSTATUS ret = STATUS_ACCESS_VIOLATION;
|
2017-09-22 12:15:27 +02:00
|
|
|
void *page = ROUND_ADDR( addr, page_mask );
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2017-09-22 12:15:27 +02:00
|
|
|
BYTE vprot;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2017-09-22 12:15:27 +02:00
|
|
|
vprot = get_page_vprot( page );
|
|
|
|
if (!on_signal_stack && (vprot & VPROT_GUARD))
|
|
|
|
{
|
|
|
|
set_page_vprot_bits( page, page_size, 0, VPROT_GUARD );
|
|
|
|
mprotect_range( page, page_size, 0, 0 );
|
|
|
|
ret = STATUS_GUARD_PAGE_VIOLATION;
|
|
|
|
}
|
|
|
|
else if (err & EXCEPTION_WRITE_FAULT)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2017-09-22 12:15:27 +02:00
|
|
|
if (vprot & VPROT_WRITEWATCH)
|
2008-11-25 12:07:35 +01:00
|
|
|
{
|
2017-09-22 12:15:27 +02:00
|
|
|
set_page_vprot_bits( page, page_size, 0, VPROT_WRITEWATCH );
|
|
|
|
mprotect_range( page, page_size, 0, 0 );
|
2008-11-25 12:07:35 +01:00
|
|
|
}
|
2017-09-22 12:15:27 +02:00
|
|
|
/* ignore fault if page is writable now */
|
|
|
|
if (VIRTUAL_GetUnixProt( get_page_vprot( page )) & PROT_WRITE)
|
2014-10-04 02:48:16 +02:00
|
|
|
{
|
2017-09-22 12:15:27 +02:00
|
|
|
if ((vprot & VPROT_WRITEWATCH) || is_write_watch_range( page, page_size ))
|
|
|
|
ret = STATUS_SUCCESS;
|
2014-10-04 02:48:16 +02:00
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2002-09-17 20:54:42 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 14:56:56 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* check_write_access
|
|
|
|
*
|
|
|
|
* Check if the memory range is writable, temporarily disabling write watches if necessary.
|
|
|
|
*/
|
|
|
|
static NTSTATUS check_write_access( void *base, size_t size, BOOL *has_write_watch )
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
char *addr = ROUND_ADDR( base, page_mask );
|
|
|
|
|
|
|
|
size = ROUND_SIZE( base, size );
|
|
|
|
for (i = 0; i < size; i += page_size)
|
|
|
|
{
|
|
|
|
BYTE vprot = get_page_vprot( addr + i );
|
|
|
|
if (vprot & VPROT_WRITEWATCH) *has_write_watch = TRUE;
|
|
|
|
if (!(VIRTUAL_GetUnixProt( vprot & ~VPROT_WRITEWATCH ) & PROT_WRITE))
|
|
|
|
return STATUS_INVALID_USER_BUFFER;
|
|
|
|
}
|
|
|
|
if (*has_write_watch)
|
|
|
|
mprotect_range( addr, size, 0, VPROT_WRITEWATCH ); /* temporarily enable write access */
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 14:58:09 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_locked_server_call
|
|
|
|
*/
|
|
|
|
unsigned int virtual_locked_server_call( void *req_ptr )
|
|
|
|
{
|
|
|
|
struct __server_request_info * const req = req_ptr;
|
|
|
|
sigset_t sigset;
|
|
|
|
void *addr = req->reply_data;
|
|
|
|
data_size_t size = req->u.req.request_header.reply_size;
|
|
|
|
BOOL has_write_watch = FALSE;
|
|
|
|
unsigned int ret = STATUS_ACCESS_VIOLATION;
|
|
|
|
|
|
|
|
if (!size) return wine_server_call( req_ptr );
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
if (!(ret = check_write_access( addr, size, &has_write_watch )))
|
|
|
|
{
|
2020-05-29 15:53:42 +02:00
|
|
|
ret = unix_funcs->server_call_unlocked( req );
|
2017-09-22 14:58:09 +02:00
|
|
|
if (has_write_watch) update_write_watches( addr, size, wine_server_reply_size( req ));
|
|
|
|
}
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-09-22 15:04:34 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_locked_read
|
|
|
|
*/
|
|
|
|
ssize_t virtual_locked_read( int fd, void *addr, size_t size )
|
|
|
|
{
|
|
|
|
sigset_t sigset;
|
|
|
|
BOOL has_write_watch = FALSE;
|
|
|
|
int err = EFAULT;
|
|
|
|
|
|
|
|
ssize_t ret = read( fd, addr, size );
|
|
|
|
if (ret != -1 || errno != EFAULT) return ret;
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
if (!check_write_access( addr, size, &has_write_watch ))
|
|
|
|
{
|
|
|
|
ret = read( fd, addr, size );
|
|
|
|
err = errno;
|
|
|
|
if (has_write_watch) update_write_watches( addr, size, max( 0, ret ));
|
|
|
|
}
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
errno = err;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* virtual_locked_pread
|
|
|
|
*/
|
|
|
|
ssize_t virtual_locked_pread( int fd, void *addr, size_t size, off_t offset )
|
|
|
|
{
|
|
|
|
sigset_t sigset;
|
|
|
|
BOOL has_write_watch = FALSE;
|
|
|
|
int err = EFAULT;
|
|
|
|
|
|
|
|
ssize_t ret = pread( fd, addr, size, offset );
|
|
|
|
if (ret != -1 || errno != EFAULT) return ret;
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
if (!check_write_access( addr, size, &has_write_watch ))
|
|
|
|
{
|
|
|
|
ret = pread( fd, addr, size, offset );
|
|
|
|
err = errno;
|
|
|
|
if (has_write_watch) update_write_watches( addr, size, max( 0, ret ));
|
|
|
|
}
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
errno = err;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2017-10-03 13:36:40 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* __wine_locked_recvmsg
|
|
|
|
*/
|
|
|
|
ssize_t CDECL __wine_locked_recvmsg( int fd, struct msghdr *hdr, int flags )
|
|
|
|
{
|
|
|
|
sigset_t sigset;
|
|
|
|
size_t i;
|
|
|
|
BOOL has_write_watch = FALSE;
|
|
|
|
int err = EFAULT;
|
|
|
|
|
|
|
|
ssize_t ret = recvmsg( fd, hdr, flags );
|
|
|
|
if (ret != -1 || errno != EFAULT) return ret;
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
for (i = 0; i < hdr->msg_iovlen; i++)
|
|
|
|
if (check_write_access( hdr->msg_iov[i].iov_base, hdr->msg_iov[i].iov_len, &has_write_watch ))
|
|
|
|
break;
|
|
|
|
if (i == hdr->msg_iovlen)
|
|
|
|
{
|
|
|
|
ret = recvmsg( fd, hdr, flags );
|
|
|
|
err = errno;
|
|
|
|
}
|
|
|
|
if (has_write_watch)
|
|
|
|
while (i--) update_write_watches( hdr->msg_iov[i].iov_base, hdr->msg_iov[i].iov_len, 0 );
|
|
|
|
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
errno = err;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-04-01 17:37:17 +02:00
|
|
|
|
2013-02-12 19:56:08 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_is_valid_code_address
|
|
|
|
*/
|
|
|
|
BOOL virtual_is_valid_code_address( const void *addr, SIZE_T size )
|
|
|
|
{
|
|
|
|
struct file_view *view;
|
|
|
|
BOOL ret = FALSE;
|
|
|
|
sigset_t sigset;
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
if ((view = VIRTUAL_FindView( addr, size )))
|
|
|
|
ret = !(view->protect & VPROT_SYSTEM); /* system views are not visible to the app */
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-04-01 17:37:17 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_handle_stack_fault
|
|
|
|
*
|
|
|
|
* Handle an access fault inside the current thread stack.
|
2019-08-24 12:24:09 +02:00
|
|
|
* Return 1 if safely handled, -1 if handled into the overflow space.
|
2008-04-01 17:37:17 +02:00
|
|
|
* Called from inside a signal handler.
|
|
|
|
*/
|
2019-08-24 12:24:09 +02:00
|
|
|
int virtual_handle_stack_fault( void *addr )
|
2008-04-01 17:37:17 +02:00
|
|
|
{
|
2019-08-24 12:24:09 +02:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if ((char *)addr < (char *)NtCurrentTeb()->DeallocationStack) return 0;
|
|
|
|
if ((char *)addr >= (char *)NtCurrentTeb()->Tib.StackBase) return 0;
|
2008-04-01 17:37:17 +02:00
|
|
|
|
|
|
|
RtlEnterCriticalSection( &csVirtual ); /* no need for signal masking inside signal handler */
|
2017-09-22 12:13:50 +02:00
|
|
|
if (get_page_vprot( addr ) & VPROT_GUARD)
|
2008-04-01 17:37:17 +02:00
|
|
|
{
|
2019-08-24 12:24:09 +02:00
|
|
|
size_t guaranteed = max( NtCurrentTeb()->GuaranteedStackBytes, page_size * (is_win64 ? 2 : 1) );
|
2017-09-22 12:13:50 +02:00
|
|
|
char *page = ROUND_ADDR( addr, page_mask );
|
|
|
|
set_page_vprot_bits( page, page_size, 0, VPROT_GUARD );
|
|
|
|
mprotect_range( page, page_size, 0, 0 );
|
2019-08-24 12:24:09 +02:00
|
|
|
if (page >= (char *)NtCurrentTeb()->DeallocationStack + page_size + guaranteed)
|
2008-04-01 17:37:17 +02:00
|
|
|
{
|
2019-08-24 12:24:09 +02:00
|
|
|
set_page_vprot_bits( page - page_size, page_size, VPROT_COMMITTED | VPROT_GUARD, 0 );
|
|
|
|
mprotect_range( page - page_size, page_size, 0, 0 );
|
|
|
|
ret = 1;
|
2008-04-01 17:37:17 +02:00
|
|
|
}
|
2019-08-24 12:24:09 +02:00
|
|
|
else /* inside guaranteed space -> overflow exception */
|
|
|
|
{
|
|
|
|
page = (char *)NtCurrentTeb()->DeallocationStack + page_size;
|
|
|
|
set_page_vprot_bits( page, guaranteed, VPROT_COMMITTED, VPROT_GUARD );
|
|
|
|
mprotect_range( page, guaranteed, 0, 0 );
|
|
|
|
ret = -1;
|
|
|
|
}
|
|
|
|
NtCurrentTeb()->Tib.StackLimit = page;
|
2008-04-01 17:37:17 +02:00
|
|
|
}
|
|
|
|
RtlLeaveCriticalSection( &csVirtual );
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-01-14 20:17:52 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_check_buffer_for_read
|
|
|
|
*
|
|
|
|
* Check if a memory buffer can be read, triggering page faults if needed for DIB section access.
|
|
|
|
*/
|
|
|
|
BOOL virtual_check_buffer_for_read( const void *ptr, SIZE_T size )
|
|
|
|
{
|
|
|
|
if (!size) return TRUE;
|
|
|
|
if (!ptr) return FALSE;
|
|
|
|
|
|
|
|
__TRY
|
|
|
|
{
|
|
|
|
volatile const char *p = ptr;
|
2011-10-28 17:04:33 +02:00
|
|
|
char dummy __attribute__((unused));
|
2009-01-14 20:17:52 +01:00
|
|
|
SIZE_T count = size;
|
|
|
|
|
|
|
|
while (count > page_size)
|
|
|
|
{
|
|
|
|
dummy = *p;
|
|
|
|
p += page_size;
|
|
|
|
count -= page_size;
|
|
|
|
}
|
|
|
|
dummy = p[0];
|
|
|
|
dummy = p[count - 1];
|
|
|
|
}
|
|
|
|
__EXCEPT_PAGE_FAULT
|
|
|
|
{
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
__ENDTRY
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-01-14 20:17:52 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_check_buffer_for_write
|
|
|
|
*
|
|
|
|
* Check if a memory buffer can be written to, triggering page faults if needed for write watches.
|
|
|
|
*/
|
|
|
|
BOOL virtual_check_buffer_for_write( void *ptr, SIZE_T size )
|
|
|
|
{
|
|
|
|
if (!size) return TRUE;
|
|
|
|
if (!ptr) return FALSE;
|
|
|
|
|
|
|
|
__TRY
|
|
|
|
{
|
|
|
|
volatile char *p = ptr;
|
|
|
|
SIZE_T count = size;
|
|
|
|
|
|
|
|
while (count > page_size)
|
|
|
|
{
|
|
|
|
*p |= 0;
|
|
|
|
p += page_size;
|
|
|
|
count -= page_size;
|
|
|
|
}
|
|
|
|
p[0] |= 0;
|
|
|
|
p[count - 1] |= 0;
|
|
|
|
}
|
|
|
|
__EXCEPT_PAGE_FAULT
|
|
|
|
{
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
__ENDTRY
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-10-14 06:22:55 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_uninterrupted_read_memory
|
|
|
|
*
|
|
|
|
* Similar to NtReadVirtualMemory, but without wineserver calls. Moreover
|
|
|
|
* permissions are checked before accessing each page, to ensure that no
|
|
|
|
* exceptions can happen.
|
|
|
|
*/
|
|
|
|
SIZE_T virtual_uninterrupted_read_memory( const void *addr, void *buffer, SIZE_T size )
|
|
|
|
{
|
|
|
|
struct file_view *view;
|
|
|
|
sigset_t sigset;
|
|
|
|
SIZE_T bytes_read = 0;
|
|
|
|
|
|
|
|
if (!size) return 0;
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
if ((view = VIRTUAL_FindView( addr, size )))
|
|
|
|
{
|
|
|
|
if (!(view->protect & VPROT_SYSTEM))
|
|
|
|
{
|
2020-05-26 11:57:24 +02:00
|
|
|
while (bytes_read < size && (VIRTUAL_GetUnixProt( get_page_vprot( addr )) & PROT_READ))
|
2014-10-14 06:22:55 +02:00
|
|
|
{
|
2020-05-26 11:57:23 +02:00
|
|
|
SIZE_T block_size = min( size - bytes_read, page_size - ((UINT_PTR)addr & page_mask) );
|
2014-10-14 06:22:55 +02:00
|
|
|
memcpy( buffer, addr, block_size );
|
|
|
|
|
|
|
|
addr = (const void *)((const char *)addr + block_size);
|
|
|
|
buffer = (void *)((char *)buffer + block_size);
|
|
|
|
bytes_read += block_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
return bytes_read;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* virtual_uninterrupted_write_memory
|
|
|
|
*
|
|
|
|
* Similar to NtWriteVirtualMemory, but without wineserver calls. Moreover
|
|
|
|
* permissions are checked before accessing each page, to ensure that no
|
|
|
|
* exceptions can happen.
|
|
|
|
*/
|
2017-09-06 17:16:25 +02:00
|
|
|
NTSTATUS virtual_uninterrupted_write_memory( void *addr, const void *buffer, SIZE_T size )
|
2014-10-14 06:22:55 +02:00
|
|
|
{
|
2017-09-22 14:56:56 +02:00
|
|
|
BOOL has_write_watch = FALSE;
|
2014-10-14 06:22:55 +02:00
|
|
|
sigset_t sigset;
|
2017-09-22 14:56:56 +02:00
|
|
|
NTSTATUS ret;
|
2014-10-14 06:22:55 +02:00
|
|
|
|
2017-09-06 17:16:25 +02:00
|
|
|
if (!size) return STATUS_SUCCESS;
|
2014-10-14 06:22:55 +02:00
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2017-09-22 14:56:56 +02:00
|
|
|
if (!(ret = check_write_access( addr, size, &has_write_watch )))
|
2014-10-14 06:22:55 +02:00
|
|
|
{
|
2017-09-06 17:16:25 +02:00
|
|
|
memcpy( addr, buffer, size );
|
2017-09-22 14:56:56 +02:00
|
|
|
if (has_write_watch) update_write_watches( addr, size, size );
|
2014-10-14 06:22:55 +02:00
|
|
|
}
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2017-09-06 17:16:25 +02:00
|
|
|
return ret;
|
2014-10-14 06:22:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-12-05 15:42:29 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* VIRTUAL_SetForceExec
|
|
|
|
*
|
|
|
|
* Whether to force exec prot on all views.
|
|
|
|
*/
|
|
|
|
void VIRTUAL_SetForceExec( BOOL enable )
|
|
|
|
{
|
|
|
|
struct file_view *view;
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2006-12-05 15:42:29 +01:00
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2006-12-05 15:42:29 +01:00
|
|
|
if (!force_exec_prot != !enable) /* change all existing views */
|
|
|
|
{
|
|
|
|
force_exec_prot = enable;
|
|
|
|
|
2017-09-07 19:31:42 +02:00
|
|
|
WINE_RB_FOR_EACH_ENTRY( view, &views_tree, struct file_view, entry )
|
2006-12-05 15:42:29 +01:00
|
|
|
{
|
2017-09-13 11:48:57 +02:00
|
|
|
/* file mappings are always accessible */
|
|
|
|
BYTE commit = is_view_valloc( view ) ? 0 : VPROT_COMMITTED;
|
2006-12-05 15:42:29 +01:00
|
|
|
|
2017-09-22 09:52:29 +02:00
|
|
|
mprotect_range( view->base, view->size, commit, 0 );
|
2006-12-05 15:42:29 +01:00
|
|
|
}
|
|
|
|
}
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2006-12-05 15:42:29 +01:00
|
|
|
}
|
|
|
|
|
2009-06-25 14:18:53 +02:00
|
|
|
struct free_range
|
|
|
|
{
|
|
|
|
char *base;
|
|
|
|
char *limit;
|
|
|
|
};
|
|
|
|
|
2020-05-17 11:04:47 +02:00
|
|
|
/* free reserved areas above the limit; callback for mmap_enum_reserved_areas */
|
|
|
|
static int CDECL free_reserved_memory( void *base, SIZE_T size, void *arg )
|
2009-06-25 14:18:53 +02:00
|
|
|
{
|
|
|
|
struct free_range *range = arg;
|
|
|
|
|
|
|
|
if ((char *)base >= range->limit) return 0;
|
|
|
|
if ((char *)base + size <= range->base) return 0;
|
|
|
|
if ((char *)base < range->base)
|
|
|
|
{
|
|
|
|
size -= range->base - (char *)base;
|
|
|
|
base = range->base;
|
|
|
|
}
|
|
|
|
if ((char *)base + size > range->limit) size = range->limit - (char *)base;
|
|
|
|
remove_reserved_area( base, size );
|
|
|
|
return 1; /* stop enumeration since the list has changed */
|
|
|
|
}
|
2006-12-05 15:42:29 +01:00
|
|
|
|
2004-06-18 02:26:57 +02:00
|
|
|
/***********************************************************************
|
2009-06-25 14:18:53 +02:00
|
|
|
* virtual_release_address_space
|
2004-06-18 02:26:57 +02:00
|
|
|
*
|
2009-06-25 14:18:53 +02:00
|
|
|
* Release some address space once we have loaded and initialized the app.
|
2004-06-18 02:26:57 +02:00
|
|
|
*/
|
2013-04-04 13:00:47 +02:00
|
|
|
void virtual_release_address_space(void)
|
2004-06-18 02:26:57 +02:00
|
|
|
{
|
2009-06-25 14:18:53 +02:00
|
|
|
struct free_range range;
|
|
|
|
sigset_t sigset;
|
|
|
|
|
2013-04-04 13:00:47 +02:00
|
|
|
if (is_win64) return;
|
2010-04-09 16:25:25 +02:00
|
|
|
|
2009-06-25 14:18:53 +02:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
2013-04-04 13:00:47 +02:00
|
|
|
range.base = (char *)0x82000000;
|
|
|
|
range.limit = user_space_limit;
|
|
|
|
|
|
|
|
if (range.limit > range.base)
|
2009-06-25 14:18:53 +02:00
|
|
|
{
|
2020-05-17 11:04:47 +02:00
|
|
|
while (unix_funcs->mmap_enum_reserved_areas( free_reserved_memory, &range, 1 )) /* nothing */;
|
2018-12-03 17:46:51 +01:00
|
|
|
#ifdef __APPLE__
|
|
|
|
/* On macOS, we still want to free some of low memory, for OpenGL resources */
|
|
|
|
range.base = (char *)0x40000000;
|
|
|
|
#else
|
|
|
|
range.base = NULL;
|
|
|
|
#endif
|
2009-06-25 14:18:53 +02:00
|
|
|
}
|
2009-08-05 11:23:02 +02:00
|
|
|
else
|
2018-12-03 17:46:51 +01:00
|
|
|
range.base = (char *)0x20000000;
|
|
|
|
|
|
|
|
if (range.base)
|
2009-08-05 11:23:02 +02:00
|
|
|
{
|
|
|
|
range.limit = (char *)0x7f000000;
|
2020-05-17 11:04:47 +02:00
|
|
|
while (unix_funcs->mmap_enum_reserved_areas( free_reserved_memory, &range, 0 )) /* nothing */;
|
2009-08-05 11:23:02 +02:00
|
|
|
}
|
|
|
|
|
2009-06-25 14:18:53 +02:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2004-06-18 02:26:57 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-04-04 13:00:47 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* virtual_set_large_address_space
|
|
|
|
*
|
|
|
|
* Enable use of a large address space when allowed by the application.
|
|
|
|
*/
|
|
|
|
void virtual_set_large_address_space(void)
|
|
|
|
{
|
|
|
|
IMAGE_NT_HEADERS *nt = RtlImageNtHeader( NtCurrentTeb()->Peb->ImageBaseAddress );
|
|
|
|
|
|
|
|
if (!(nt->FileHeader.Characteristics & IMAGE_FILE_LARGE_ADDRESS_AWARE)) return;
|
|
|
|
/* no large address space on win9x */
|
|
|
|
if (NtCurrentTeb()->Peb->OSPlatformId != VER_PLATFORM_WIN32_NT) return;
|
|
|
|
|
|
|
|
user_space_limit = working_set_limit = address_space_limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* NtAllocateVirtualMemory (NTDLL.@)
|
|
|
|
* ZwAllocateVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
2019-08-01 10:07:40 +02:00
|
|
|
NTSTATUS WINAPI NtAllocateVirtualMemory( HANDLE process, PVOID *ret, ULONG_PTR zero_bits,
|
2005-07-15 12:01:30 +02:00
|
|
|
SIZE_T *size_ptr, ULONG type, ULONG protect )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2020-06-01 13:27:40 +02:00
|
|
|
void *base;
|
|
|
|
unsigned int vprot;
|
|
|
|
BOOL is_dos_memory = FALSE;
|
|
|
|
struct file_view *view;
|
|
|
|
sigset_t sigset;
|
2005-07-15 12:01:30 +02:00
|
|
|
SIZE_T size = *size_ptr;
|
2004-05-21 22:58:44 +02:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
2019-08-01 10:07:44 +02:00
|
|
|
unsigned short zero_bits_64 = zero_bits_win_to_64( zero_bits );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2006-10-16 13:49:06 +02:00
|
|
|
TRACE("%p %p %08lx %x %08x\n", process, *ret, size, type, protect );
|
2004-09-22 06:03:10 +02:00
|
|
|
|
|
|
|
if (!size) return STATUS_INVALID_PARAMETER;
|
2019-06-13 12:48:31 +02:00
|
|
|
if (zero_bits > 21 && zero_bits < 32) return STATUS_INVALID_PARAMETER_3;
|
|
|
|
if (!is_win64 && !is_wow64 && zero_bits >= 32) return STATUS_INVALID_PARAMETER_3;
|
2004-09-22 06:03:10 +02:00
|
|
|
|
2007-01-15 22:27:40 +01:00
|
|
|
if (process != NtCurrentProcess())
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-15 22:27:40 +01:00
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2019-08-01 10:07:44 +02:00
|
|
|
call.virtual_alloc.type = APC_VIRTUAL_ALLOC;
|
|
|
|
call.virtual_alloc.addr = wine_server_client_ptr( *ret );
|
|
|
|
call.virtual_alloc.size = *size_ptr;
|
2020-06-01 13:27:40 +02:00
|
|
|
call.virtual_alloc.zero_bits = zero_bits;
|
2019-08-01 10:07:44 +02:00
|
|
|
call.virtual_alloc.op_type = type;
|
|
|
|
call.virtual_alloc.prot = protect;
|
2020-06-01 13:40:25 +02:00
|
|
|
status = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-15 22:27:40 +01:00
|
|
|
if (status != STATUS_SUCCESS) return status;
|
|
|
|
|
|
|
|
if (result.virtual_alloc.status == STATUS_SUCCESS)
|
|
|
|
{
|
2008-12-30 15:05:38 +01:00
|
|
|
*ret = wine_server_get_ptr( result.virtual_alloc.addr );
|
2007-01-15 22:27:40 +01:00
|
|
|
*size_ptr = result.virtual_alloc.size;
|
|
|
|
}
|
|
|
|
return result.virtual_alloc.status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Round parameters to a page boundary */
|
|
|
|
|
2008-11-06 11:42:58 +01:00
|
|
|
if (is_beyond_limit( 0, size, working_set_limit )) return STATUS_WORKING_SET_LIMIT_RANGE;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2004-10-11 22:59:06 +02:00
|
|
|
if (*ret)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
|
|
|
if (type & MEM_RESERVE) /* Round down to 64k boundary */
|
2020-04-30 10:07:36 +02:00
|
|
|
base = ROUND_ADDR( *ret, granularity_mask );
|
2002-09-17 20:54:42 +02:00
|
|
|
else
|
2004-10-11 22:59:06 +02:00
|
|
|
base = ROUND_ADDR( *ret, page_mask );
|
|
|
|
size = (((UINT_PTR)*ret + size + page_mask) & ~page_mask) - (UINT_PTR)base;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
/* disallow low 64k, wrap-around and kernel space */
|
2006-07-13 20:36:41 +02:00
|
|
|
if (((char *)base < (char *)0x10000) ||
|
2002-09-17 20:54:42 +02:00
|
|
|
((char *)base + size < (char *)base) ||
|
2008-10-28 13:51:36 +01:00
|
|
|
is_beyond_limit( base, size, address_space_limit ))
|
2017-09-12 10:57:07 +02:00
|
|
|
{
|
|
|
|
/* address 1 is magic to mean DOS area */
|
|
|
|
if (!base && *ret == (void *)1 && size == 0x110000) is_dos_memory = TRUE;
|
|
|
|
else return STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
base = NULL;
|
|
|
|
size = (size + page_mask) & ~page_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Compute the alloc type flags */
|
|
|
|
|
2009-07-09 19:17:49 +02:00
|
|
|
if (!(type & (MEM_COMMIT | MEM_RESERVE | MEM_RESET)) ||
|
2008-11-18 20:14:46 +01:00
|
|
|
(type & ~(MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN | MEM_WRITE_WATCH | MEM_RESET)))
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2008-11-18 20:14:46 +01:00
|
|
|
WARN("called with wrong alloc type flags (%08x) !\n", type);
|
|
|
|
return STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2003-11-03 23:21:55 +01:00
|
|
|
/* Reserve the memory */
|
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
if (use_locks) server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2004-05-21 22:58:44 +02:00
|
|
|
|
2008-11-18 20:14:46 +01:00
|
|
|
if ((type & MEM_RESERVE) || !base)
|
2003-11-03 23:21:55 +01:00
|
|
|
{
|
2017-09-12 10:57:07 +02:00
|
|
|
if (!(status = get_vprot_flags( protect, &vprot, FALSE )))
|
|
|
|
{
|
|
|
|
if (type & MEM_COMMIT) vprot |= VPROT_COMMITTED;
|
|
|
|
if (type & MEM_WRITE_WATCH) vprot |= VPROT_WRITEWATCH;
|
|
|
|
if (protect & PAGE_NOCACHE) vprot |= SEC_NOCACHE;
|
|
|
|
|
|
|
|
if (vprot & VPROT_WRITECOPY) status = STATUS_INVALID_PAGE_PROTECTION;
|
|
|
|
else if (is_dos_memory) status = allocate_dos_memory( &view, vprot );
|
2020-04-30 10:07:36 +02:00
|
|
|
else status = map_view( &view, base, size, type & MEM_TOP_DOWN, vprot, zero_bits_64 );
|
2017-09-12 10:57:07 +02:00
|
|
|
|
|
|
|
if (status == STATUS_SUCCESS) base = view->base;
|
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2009-07-09 19:17:49 +02:00
|
|
|
else if (type & MEM_RESET)
|
|
|
|
{
|
|
|
|
if (!(view = VIRTUAL_FindView( base, size ))) status = STATUS_NOT_MAPPED_VIEW;
|
|
|
|
else madvise( base, size, MADV_DONTNEED );
|
|
|
|
}
|
2004-05-21 22:58:44 +02:00
|
|
|
else /* commit the pages */
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2008-11-25 11:58:50 +01:00
|
|
|
if (!(view = VIRTUAL_FindView( base, size ))) status = STATUS_NOT_MAPPED_VIEW;
|
2017-09-08 15:47:28 +02:00
|
|
|
else if (view->protect & SEC_FILE) status = STATUS_ALREADY_COMMITTED;
|
2017-09-12 10:57:07 +02:00
|
|
|
else if (!(status = set_protection( view, base, size, protect )) && (view->protect & SEC_RESERVE))
|
2008-11-05 12:24:05 +01:00
|
|
|
{
|
|
|
|
SERVER_START_REQ( add_mapping_committed_range )
|
|
|
|
{
|
2017-09-26 14:38:46 +02:00
|
|
|
req->base = wine_server_client_ptr( view->base );
|
2008-11-05 12:24:05 +01:00
|
|
|
req->offset = (char *)base - (char *)view->base;
|
|
|
|
req->size = size;
|
|
|
|
wine_server_call( req );
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
2017-09-11 19:51:43 +02:00
|
|
|
if (!status) VIRTUAL_DEBUG_DUMP_VIEW( view );
|
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
if (use_locks) server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2004-05-21 22:58:44 +02:00
|
|
|
|
|
|
|
if (status == STATUS_SUCCESS)
|
|
|
|
{
|
|
|
|
*ret = base;
|
|
|
|
*size_ptr = size;
|
|
|
|
}
|
|
|
|
return status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtFreeVirtualMemory (NTDLL.@)
|
|
|
|
* ZwFreeVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
2005-07-15 12:01:30 +02:00
|
|
|
NTSTATUS WINAPI NtFreeVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T *size_ptr, ULONG type )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2010-10-19 15:34:36 +02:00
|
|
|
struct file_view *view;
|
2002-09-17 20:54:42 +02:00
|
|
|
char *base;
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2004-04-06 22:16:51 +02:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
2002-09-17 20:54:42 +02:00
|
|
|
LPVOID addr = *addr_ptr;
|
2005-07-15 12:01:30 +02:00
|
|
|
SIZE_T size = *size_ptr;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2006-10-16 13:49:06 +02:00
|
|
|
TRACE("%p %p %08lx %x\n", process, addr, size, type );
|
2004-09-22 06:03:10 +02:00
|
|
|
|
2007-01-15 22:27:40 +01:00
|
|
|
if (process != NtCurrentProcess())
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-15 22:27:40 +01:00
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2007-01-15 22:27:40 +01:00
|
|
|
call.virtual_free.type = APC_VIRTUAL_FREE;
|
2008-12-30 15:05:38 +01:00
|
|
|
call.virtual_free.addr = wine_server_client_ptr( addr );
|
2007-01-15 22:27:40 +01:00
|
|
|
call.virtual_free.size = size;
|
|
|
|
call.virtual_free.op_type = type;
|
2020-06-01 13:40:25 +02:00
|
|
|
status = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-15 22:27:40 +01:00
|
|
|
if (status != STATUS_SUCCESS) return status;
|
|
|
|
|
|
|
|
if (result.virtual_free.status == STATUS_SUCCESS)
|
|
|
|
{
|
2008-12-30 15:05:38 +01:00
|
|
|
*addr_ptr = wine_server_get_ptr( result.virtual_free.addr );
|
2007-01-15 22:27:40 +01:00
|
|
|
*size_ptr = result.virtual_free.size;
|
|
|
|
}
|
|
|
|
return result.virtual_free.status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Fix the parameters */
|
|
|
|
|
|
|
|
size = ROUND_SIZE( addr, size );
|
|
|
|
base = ROUND_ADDR( addr, page_mask );
|
|
|
|
|
2006-12-01 11:38:52 +01:00
|
|
|
/* avoid freeing the DOS area when a broken app passes a NULL pointer */
|
2008-11-18 20:14:46 +01:00
|
|
|
if (!base) return STATUS_INVALID_PARAMETER;
|
2006-12-01 11:38:52 +01:00
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2004-04-06 22:16:51 +02:00
|
|
|
|
2017-09-12 12:59:28 +02:00
|
|
|
if (!(view = VIRTUAL_FindView( base, size )) || !is_view_valloc( view ))
|
2004-04-06 22:16:51 +02:00
|
|
|
{
|
|
|
|
status = STATUS_INVALID_PARAMETER;
|
|
|
|
}
|
|
|
|
else if (type == MEM_RELEASE)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2004-02-12 00:56:52 +01:00
|
|
|
/* Free the pages */
|
|
|
|
|
2004-04-06 22:16:51 +02:00
|
|
|
if (size || (base != view->base)) status = STATUS_INVALID_PARAMETER;
|
|
|
|
else
|
|
|
|
{
|
2004-05-25 03:29:24 +02:00
|
|
|
delete_view( view );
|
2004-04-06 22:16:51 +02:00
|
|
|
*addr_ptr = base;
|
|
|
|
*size_ptr = size;
|
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2004-02-12 00:56:52 +01:00
|
|
|
else if (type == MEM_DECOMMIT)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2004-05-21 22:58:44 +02:00
|
|
|
status = decommit_pages( view, base - (char *)view->base, size );
|
|
|
|
if (status == STATUS_SUCCESS)
|
2004-04-06 22:16:51 +02:00
|
|
|
{
|
|
|
|
*addr_ptr = base;
|
|
|
|
*size_ptr = size;
|
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2004-02-12 00:56:52 +01:00
|
|
|
else
|
|
|
|
{
|
2006-10-16 13:49:06 +02:00
|
|
|
WARN("called with wrong free type flags (%08x) !\n", type);
|
2004-04-06 22:16:51 +02:00
|
|
|
status = STATUS_INVALID_PARAMETER;
|
2004-02-12 00:56:52 +01:00
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2004-04-06 22:16:51 +02:00
|
|
|
return status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtProtectVirtualMemory (NTDLL.@)
|
|
|
|
* ZwProtectVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
2019-12-04 12:02:12 +01:00
|
|
|
NTSTATUS WINAPI DECLSPEC_HOTPATCH NtProtectVirtualMemory( HANDLE process, PVOID *addr_ptr, SIZE_T *size_ptr,
|
|
|
|
ULONG new_prot, ULONG *old_prot )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2010-10-19 15:34:36 +02:00
|
|
|
struct file_view *view;
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2004-04-06 22:16:51 +02:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
2002-09-17 20:54:42 +02:00
|
|
|
char *base;
|
2008-11-05 12:24:05 +01:00
|
|
|
BYTE vprot;
|
2005-07-15 12:01:30 +02:00
|
|
|
SIZE_T size = *size_ptr;
|
2002-09-17 20:54:42 +02:00
|
|
|
LPVOID addr = *addr_ptr;
|
2017-09-12 10:57:07 +02:00
|
|
|
DWORD old;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2006-10-16 13:49:06 +02:00
|
|
|
TRACE("%p %p %08lx %08x\n", process, addr, size, new_prot );
|
2004-09-22 06:03:10 +02:00
|
|
|
|
2015-05-02 18:46:38 +02:00
|
|
|
if (!old_prot)
|
|
|
|
return STATUS_ACCESS_VIOLATION;
|
|
|
|
|
2007-01-15 22:30:04 +01:00
|
|
|
if (process != NtCurrentProcess())
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-15 22:30:04 +01:00
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2007-01-15 22:30:04 +01:00
|
|
|
call.virtual_protect.type = APC_VIRTUAL_PROTECT;
|
2008-12-30 15:05:38 +01:00
|
|
|
call.virtual_protect.addr = wine_server_client_ptr( addr );
|
2007-01-15 22:30:04 +01:00
|
|
|
call.virtual_protect.size = size;
|
|
|
|
call.virtual_protect.prot = new_prot;
|
2020-06-01 13:40:25 +02:00
|
|
|
status = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-15 22:30:04 +01:00
|
|
|
if (status != STATUS_SUCCESS) return status;
|
|
|
|
|
|
|
|
if (result.virtual_protect.status == STATUS_SUCCESS)
|
|
|
|
{
|
2008-12-30 15:05:38 +01:00
|
|
|
*addr_ptr = wine_server_get_ptr( result.virtual_protect.addr );
|
2007-01-15 22:30:04 +01:00
|
|
|
*size_ptr = result.virtual_protect.size;
|
2019-02-21 20:35:38 +01:00
|
|
|
*old_prot = result.virtual_protect.prot;
|
2007-01-15 22:30:04 +01:00
|
|
|
}
|
|
|
|
return result.virtual_protect.status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Fix the parameters */
|
|
|
|
|
|
|
|
size = ROUND_SIZE( addr, size );
|
|
|
|
base = ROUND_ADDR( addr, page_mask );
|
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2011-12-16 07:44:35 +01:00
|
|
|
if ((view = VIRTUAL_FindView( base, size )))
|
2004-04-06 22:16:51 +02:00
|
|
|
{
|
|
|
|
/* Make sure all the pages are committed */
|
2008-11-05 12:24:05 +01:00
|
|
|
if (get_committed_size( view, base, &vprot ) >= size && (vprot & VPROT_COMMITTED))
|
2004-04-06 22:16:51 +02:00
|
|
|
{
|
2017-09-12 10:57:07 +02:00
|
|
|
old = VIRTUAL_GetWin32Prot( vprot, view->protect );
|
|
|
|
status = set_protection( view, base, size, new_prot );
|
2004-04-06 22:16:51 +02:00
|
|
|
}
|
2008-11-05 12:24:05 +01:00
|
|
|
else status = STATUS_NOT_COMMITTED;
|
2004-04-06 22:16:51 +02:00
|
|
|
}
|
2011-12-16 07:44:35 +01:00
|
|
|
else status = STATUS_INVALID_PARAMETER;
|
|
|
|
|
2017-09-11 19:51:43 +02:00
|
|
|
if (!status) VIRTUAL_DEBUG_DUMP_VIEW( view );
|
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2004-04-06 22:16:51 +02:00
|
|
|
if (status == STATUS_SUCCESS)
|
|
|
|
{
|
|
|
|
*addr_ptr = base;
|
|
|
|
*size_ptr = size;
|
2017-09-12 10:57:07 +02:00
|
|
|
*old_prot = old;
|
2004-04-06 22:16:51 +02:00
|
|
|
}
|
|
|
|
return status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
2008-11-06 15:00:58 +01:00
|
|
|
|
2020-05-17 11:04:47 +02:00
|
|
|
/* retrieve state for a free memory area; callback for mmap_enum_reserved_areas */
|
|
|
|
static int CDECL get_free_mem_state_callback( void *start, SIZE_T size, void *arg )
|
2008-11-06 15:00:58 +01:00
|
|
|
{
|
|
|
|
MEMORY_BASIC_INFORMATION *info = arg;
|
|
|
|
void *end = (char *)start + size;
|
|
|
|
|
2019-03-27 17:33:25 +01:00
|
|
|
if ((char *)info->BaseAddress + info->RegionSize <= (char *)start) return 0;
|
2008-11-06 15:00:58 +01:00
|
|
|
|
|
|
|
if (info->BaseAddress >= end)
|
|
|
|
{
|
|
|
|
if (info->AllocationBase < end) info->AllocationBase = end;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-11-04 17:32:42 +01:00
|
|
|
if (info->BaseAddress >= start || start <= address_space_start)
|
2008-11-06 15:00:58 +01:00
|
|
|
{
|
|
|
|
/* it's a real free area */
|
|
|
|
info->State = MEM_FREE;
|
|
|
|
info->Protect = PAGE_NOACCESS;
|
|
|
|
info->AllocationBase = 0;
|
|
|
|
info->AllocationProtect = 0;
|
|
|
|
info->Type = 0;
|
|
|
|
if ((char *)info->BaseAddress + info->RegionSize > (char *)end)
|
|
|
|
info->RegionSize = (char *)end - (char *)info->BaseAddress;
|
|
|
|
}
|
|
|
|
else /* outside of the reserved area, pretend it's allocated */
|
|
|
|
{
|
|
|
|
info->RegionSize = (char *)start - (char *)info->BaseAddress;
|
|
|
|
info->State = MEM_RESERVE;
|
|
|
|
info->Protect = PAGE_NOACCESS;
|
|
|
|
info->AllocationProtect = PAGE_NOACCESS;
|
|
|
|
info->Type = MEM_PRIVATE;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-01-24 21:19:37 +01:00
|
|
|
/* get basic information about a memory block */
|
|
|
|
static NTSTATUS get_basic_memory_info( HANDLE process, LPCVOID addr,
|
|
|
|
MEMORY_BASIC_INFORMATION *info,
|
|
|
|
SIZE_T len, SIZE_T *res_len )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2010-10-19 15:34:36 +02:00
|
|
|
struct file_view *view;
|
2017-09-07 19:31:42 +02:00
|
|
|
char *base, *alloc_base = 0, *alloc_end = working_set_limit;
|
|
|
|
struct wine_rb_entry *ptr;
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2019-02-18 15:22:52 +01:00
|
|
|
if (len < sizeof(MEMORY_BASIC_INFORMATION))
|
|
|
|
return STATUS_INFO_LENGTH_MISMATCH;
|
|
|
|
|
2007-01-15 22:28:42 +01:00
|
|
|
if (process != NtCurrentProcess())
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-15 22:28:42 +01:00
|
|
|
NTSTATUS status;
|
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2007-01-15 22:28:42 +01:00
|
|
|
call.virtual_query.type = APC_VIRTUAL_QUERY;
|
2008-12-30 15:05:38 +01:00
|
|
|
call.virtual_query.addr = wine_server_client_ptr( addr );
|
2020-06-01 13:40:25 +02:00
|
|
|
status = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-15 22:28:42 +01:00
|
|
|
if (status != STATUS_SUCCESS) return status;
|
|
|
|
|
|
|
|
if (result.virtual_query.status == STATUS_SUCCESS)
|
|
|
|
{
|
2008-12-30 15:05:38 +01:00
|
|
|
info->BaseAddress = wine_server_get_ptr( result.virtual_query.base );
|
|
|
|
info->AllocationBase = wine_server_get_ptr( result.virtual_query.alloc_base );
|
2007-01-15 22:28:42 +01:00
|
|
|
info->RegionSize = result.virtual_query.size;
|
|
|
|
info->Protect = result.virtual_query.prot;
|
|
|
|
info->AllocationProtect = result.virtual_query.alloc_prot;
|
2008-12-30 15:22:45 +01:00
|
|
|
info->State = (DWORD)result.virtual_query.state << 12;
|
|
|
|
info->Type = (DWORD)result.virtual_query.alloc_type << 16;
|
2008-12-17 19:25:09 +01:00
|
|
|
if (info->RegionSize != result.virtual_query.size) /* truncated */
|
|
|
|
return STATUS_INVALID_PARAMETER; /* FIXME */
|
2007-01-15 22:28:42 +01:00
|
|
|
if (res_len) *res_len = sizeof(*info);
|
|
|
|
}
|
|
|
|
return result.virtual_query.status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
base = ROUND_ADDR( addr, page_mask );
|
|
|
|
|
2018-02-28 00:31:23 +01:00
|
|
|
if (is_beyond_limit( base, 1, working_set_limit )) return STATUS_INVALID_PARAMETER;
|
2008-11-06 11:42:58 +01:00
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/* Find the view containing the address */
|
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2017-09-07 19:31:42 +02:00
|
|
|
ptr = views_tree.root;
|
|
|
|
while (ptr)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
view = WINE_RB_ENTRY_VALUE( ptr, struct file_view, entry );
|
|
|
|
if ((char *)view->base > base)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
alloc_end = view->base;
|
|
|
|
ptr = ptr->left;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2017-09-07 19:31:42 +02:00
|
|
|
else if ((char *)view->base + view->size <= base)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2017-09-07 19:31:42 +02:00
|
|
|
alloc_base = (char *)view->base + view->size;
|
|
|
|
ptr = ptr->right;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2017-09-07 19:31:42 +02:00
|
|
|
else
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
|
|
|
alloc_base = view->base;
|
2017-09-07 19:31:42 +02:00
|
|
|
alloc_end = (char *)view->base + view->size;
|
2002-09-17 20:54:42 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill the info structure */
|
|
|
|
|
2008-11-06 15:00:58 +01:00
|
|
|
info->AllocationBase = alloc_base;
|
|
|
|
info->BaseAddress = base;
|
2017-09-07 19:31:42 +02:00
|
|
|
info->RegionSize = alloc_end - base;
|
2008-11-06 15:00:58 +01:00
|
|
|
|
2017-09-07 19:31:42 +02:00
|
|
|
if (!ptr)
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2020-05-17 11:04:47 +02:00
|
|
|
if (!unix_funcs->mmap_enum_reserved_areas( get_free_mem_state_callback, info, 0 ))
|
2008-11-06 15:00:58 +01:00
|
|
|
{
|
|
|
|
/* not in a reserved area at all, pretend it's allocated */
|
2009-04-06 12:07:33 +02:00
|
|
|
#ifdef __i386__
|
2010-11-04 17:32:42 +01:00
|
|
|
if (base >= (char *)address_space_start)
|
|
|
|
{
|
|
|
|
info->State = MEM_RESERVE;
|
|
|
|
info->Protect = PAGE_NOACCESS;
|
|
|
|
info->AllocationProtect = PAGE_NOACCESS;
|
|
|
|
info->Type = MEM_PRIVATE;
|
|
|
|
}
|
|
|
|
else
|
2009-04-06 12:07:33 +02:00
|
|
|
#endif
|
2010-11-04 17:32:42 +01:00
|
|
|
{
|
|
|
|
info->State = MEM_FREE;
|
|
|
|
info->Protect = PAGE_NOACCESS;
|
|
|
|
info->AllocationBase = 0;
|
|
|
|
info->AllocationProtect = 0;
|
|
|
|
info->Type = 0;
|
|
|
|
}
|
2008-11-06 15:00:58 +01:00
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-11-05 12:24:05 +01:00
|
|
|
BYTE vprot;
|
2017-09-05 13:55:36 +02:00
|
|
|
char *ptr;
|
2008-11-05 12:24:05 +01:00
|
|
|
SIZE_T range_size = get_committed_size( view, base, &vprot );
|
|
|
|
|
2005-12-14 12:17:59 +01:00
|
|
|
info->State = (vprot & VPROT_COMMITTED) ? MEM_COMMIT : MEM_RESERVE;
|
2017-09-08 15:37:12 +02:00
|
|
|
info->Protect = (vprot & VPROT_COMMITTED) ? VIRTUAL_GetWin32Prot( vprot, view->protect ) : 0;
|
|
|
|
info->AllocationProtect = VIRTUAL_GetWin32Prot( view->protect, view->protect );
|
|
|
|
if (view->protect & SEC_IMAGE) info->Type = MEM_IMAGE;
|
2017-09-12 12:59:28 +02:00
|
|
|
else if (view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT)) info->Type = MEM_MAPPED;
|
|
|
|
else info->Type = MEM_PRIVATE;
|
2017-09-05 13:55:36 +02:00
|
|
|
for (ptr = base; ptr < base + range_size; ptr += page_size)
|
2017-09-05 16:20:23 +02:00
|
|
|
if ((get_page_vprot( ptr ) ^ vprot) & ~VPROT_WRITEWATCH) break;
|
2017-09-05 13:55:36 +02:00
|
|
|
info->RegionSize = ptr - base;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2003-05-08 05:50:32 +02:00
|
|
|
if (res_len) *res_len = sizeof(*info);
|
2002-09-17 20:54:42 +02:00
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
|
|
|
|
2020-01-24 21:19:38 +01:00
|
|
|
static NTSTATUS get_working_set_ex( HANDLE process, LPCVOID addr,
|
|
|
|
MEMORY_WORKING_SET_EX_INFORMATION *info,
|
|
|
|
SIZE_T len, SIZE_T *res_len )
|
|
|
|
{
|
|
|
|
FILE *f;
|
|
|
|
MEMORY_WORKING_SET_EX_INFORMATION *p;
|
|
|
|
sigset_t sigset;
|
|
|
|
|
|
|
|
if (process != NtCurrentProcess())
|
|
|
|
{
|
|
|
|
FIXME( "(process=%p,addr=%p) Unimplemented information class: MemoryWorkingSetExInformation\n", process, addr );
|
|
|
|
return STATUS_INVALID_INFO_CLASS;
|
|
|
|
}
|
|
|
|
|
|
|
|
f = fopen( "/proc/self/pagemap", "rb" );
|
|
|
|
if (!f)
|
|
|
|
{
|
|
|
|
static int once;
|
|
|
|
if (!once++) WARN( "unable to open /proc/self/pagemap\n" );
|
|
|
|
}
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
for (p = info; (UINT_PTR)(p + 1) <= (UINT_PTR)info + len; p++)
|
|
|
|
{
|
|
|
|
BYTE vprot;
|
|
|
|
UINT64 pagemap;
|
|
|
|
struct file_view *view;
|
|
|
|
|
|
|
|
memset( &p->VirtualAttributes, 0, sizeof(p->VirtualAttributes) );
|
|
|
|
|
|
|
|
/* If we don't have pagemap information, default to invalid. */
|
|
|
|
if (!f || fseek( f, ((UINT_PTR)p->VirtualAddress >> 12) * sizeof(pagemap), SEEK_SET ) == -1 ||
|
|
|
|
fread( &pagemap, sizeof(pagemap), 1, f ) != 1)
|
|
|
|
{
|
|
|
|
pagemap = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((view = VIRTUAL_FindView( p->VirtualAddress, 0 )) &&
|
|
|
|
get_committed_size( view, p->VirtualAddress, &vprot ) &&
|
|
|
|
(vprot & VPROT_COMMITTED))
|
|
|
|
{
|
|
|
|
p->VirtualAttributes.Valid = !(vprot & VPROT_GUARD) && (vprot & 0x0f) && (pagemap >> 63);
|
|
|
|
p->VirtualAttributes.Shared = !is_view_valloc( view ) && ((pagemap >> 61) & 1);
|
|
|
|
if (p->VirtualAttributes.Shared && p->VirtualAttributes.Valid)
|
|
|
|
p->VirtualAttributes.ShareCount = 1; /* FIXME */
|
|
|
|
if (p->VirtualAttributes.Valid)
|
|
|
|
p->VirtualAttributes.Win32Protection = VIRTUAL_GetWin32Prot( vprot, view->protect );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
|
|
|
if (f)
|
|
|
|
fclose( f );
|
|
|
|
if (res_len)
|
|
|
|
*res_len = (UINT_PTR)p - (UINT_PTR)info;
|
|
|
|
return STATUS_SUCCESS;
|
|
|
|
}
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2020-01-24 21:19:37 +01:00
|
|
|
#define UNIMPLEMENTED_INFO_CLASS(c) \
|
|
|
|
case c: \
|
|
|
|
FIXME("(process=%p,addr=%p) Unimplemented information class: " #c "\n", process, addr); \
|
|
|
|
return STATUS_INVALID_INFO_CLASS
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtQueryVirtualMemory (NTDLL.@)
|
|
|
|
* ZwQueryVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtQueryVirtualMemory( HANDLE process, LPCVOID addr,
|
|
|
|
MEMORY_INFORMATION_CLASS info_class,
|
|
|
|
PVOID buffer, SIZE_T len, SIZE_T *res_len )
|
|
|
|
{
|
|
|
|
TRACE("(%p, %p, info_class=%d, %p, %ld, %p)\n",
|
|
|
|
process, addr, info_class, buffer, len, res_len);
|
|
|
|
|
|
|
|
switch(info_class)
|
|
|
|
{
|
|
|
|
case MemoryBasicInformation:
|
|
|
|
return get_basic_memory_info( process, addr, buffer, len, res_len );
|
|
|
|
|
2020-01-24 21:19:38 +01:00
|
|
|
case MemoryWorkingSetExInformation:
|
|
|
|
return get_working_set_ex( process, addr, buffer, len, res_len );
|
|
|
|
|
2020-01-24 21:19:37 +01:00
|
|
|
UNIMPLEMENTED_INFO_CLASS(MemoryWorkingSetList);
|
|
|
|
UNIMPLEMENTED_INFO_CLASS(MemorySectionName);
|
|
|
|
UNIMPLEMENTED_INFO_CLASS(MemoryBasicVlmInformation);
|
|
|
|
|
|
|
|
default:
|
|
|
|
FIXME("(%p,%p,info_class=%d,%p,%ld,%p) Unknown information class\n",
|
|
|
|
process, addr, info_class, buffer, len, res_len);
|
|
|
|
return STATUS_INVALID_INFO_CLASS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* NtLockVirtualMemory (NTDLL.@)
|
|
|
|
* ZwLockVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
2005-07-15 12:01:30 +02:00
|
|
|
NTSTATUS WINAPI NtLockVirtualMemory( HANDLE process, PVOID *addr, SIZE_T *size, ULONG unknown )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-16 09:50:08 +01:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
|
|
|
|
|
|
|
if (process != NtCurrentProcess())
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-16 09:50:08 +01:00
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2007-01-16 09:50:08 +01:00
|
|
|
call.virtual_lock.type = APC_VIRTUAL_LOCK;
|
2008-12-30 15:05:38 +01:00
|
|
|
call.virtual_lock.addr = wine_server_client_ptr( *addr );
|
2007-01-16 09:50:08 +01:00
|
|
|
call.virtual_lock.size = *size;
|
2020-06-01 13:40:25 +02:00
|
|
|
status = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-16 09:50:08 +01:00
|
|
|
if (status != STATUS_SUCCESS) return status;
|
|
|
|
|
|
|
|
if (result.virtual_lock.status == STATUS_SUCCESS)
|
|
|
|
{
|
2008-12-30 15:05:38 +01:00
|
|
|
*addr = wine_server_get_ptr( result.virtual_lock.addr );
|
2007-01-16 09:50:08 +01:00
|
|
|
*size = result.virtual_lock.size;
|
|
|
|
}
|
|
|
|
return result.virtual_lock.status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2007-01-16 09:50:08 +01:00
|
|
|
|
|
|
|
*size = ROUND_SIZE( *addr, *size );
|
|
|
|
*addr = ROUND_ADDR( *addr, page_mask );
|
|
|
|
|
|
|
|
if (mlock( *addr, *size )) status = STATUS_ACCESS_DENIED;
|
|
|
|
return status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtUnlockVirtualMemory (NTDLL.@)
|
|
|
|
* ZwUnlockVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
2005-07-15 12:01:30 +02:00
|
|
|
NTSTATUS WINAPI NtUnlockVirtualMemory( HANDLE process, PVOID *addr, SIZE_T *size, ULONG unknown )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-16 09:50:08 +01:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
|
|
|
|
|
|
|
if (process != NtCurrentProcess())
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-16 09:50:08 +01:00
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2007-01-16 09:50:08 +01:00
|
|
|
call.virtual_unlock.type = APC_VIRTUAL_UNLOCK;
|
2008-12-30 15:05:38 +01:00
|
|
|
call.virtual_unlock.addr = wine_server_client_ptr( *addr );
|
2007-01-16 09:50:08 +01:00
|
|
|
call.virtual_unlock.size = *size;
|
2020-06-01 13:40:25 +02:00
|
|
|
status = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-16 09:50:08 +01:00
|
|
|
if (status != STATUS_SUCCESS) return status;
|
|
|
|
|
|
|
|
if (result.virtual_unlock.status == STATUS_SUCCESS)
|
|
|
|
{
|
2008-12-30 15:05:38 +01:00
|
|
|
*addr = wine_server_get_ptr( result.virtual_unlock.addr );
|
2007-01-16 09:50:08 +01:00
|
|
|
*size = result.virtual_unlock.size;
|
|
|
|
}
|
|
|
|
return result.virtual_unlock.status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2007-01-16 09:50:08 +01:00
|
|
|
|
|
|
|
*size = ROUND_SIZE( *addr, *size );
|
|
|
|
*addr = ROUND_ADDR( *addr, page_mask );
|
|
|
|
|
|
|
|
if (munlock( *addr, *size )) status = STATUS_ACCESS_DENIED;
|
|
|
|
return status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtCreateSection (NTDLL.@)
|
|
|
|
* ZwCreateSection (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtCreateSection( HANDLE *handle, ACCESS_MASK access, const OBJECT_ATTRIBUTES *attr,
|
|
|
|
const LARGE_INTEGER *size, ULONG protect,
|
|
|
|
ULONG sec_flags, HANDLE file )
|
|
|
|
{
|
|
|
|
NTSTATUS ret;
|
2017-09-12 11:42:04 +02:00
|
|
|
unsigned int vprot, file_access = 0;
|
2016-01-15 09:40:56 +01:00
|
|
|
data_size_t len;
|
|
|
|
struct object_attributes *objattr;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2011-12-16 07:45:23 +01:00
|
|
|
if ((ret = get_vprot_flags( protect, &vprot, sec_flags & SEC_IMAGE ))) return ret;
|
2016-01-15 09:40:56 +01:00
|
|
|
if ((ret = alloc_object_attributes( attr, &objattr, &len ))) return ret;
|
2007-10-25 20:09:54 +02:00
|
|
|
|
2017-09-12 11:42:04 +02:00
|
|
|
if (vprot & VPROT_READ) file_access |= FILE_READ_DATA;
|
|
|
|
if (vprot & VPROT_WRITE) file_access |= FILE_WRITE_DATA;
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
SERVER_START_REQ( create_mapping )
|
|
|
|
{
|
2005-10-27 20:30:37 +02:00
|
|
|
req->access = access;
|
2016-07-26 07:20:02 +02:00
|
|
|
req->flags = sec_flags;
|
2008-12-08 16:05:17 +01:00
|
|
|
req->file_handle = wine_server_obj_handle( file );
|
2017-09-12 11:42:04 +02:00
|
|
|
req->file_access = file_access;
|
2007-10-10 14:06:25 +02:00
|
|
|
req->size = size ? size->QuadPart : 0;
|
2016-01-15 09:40:56 +01:00
|
|
|
wine_server_add_data( req, objattr, len );
|
2002-09-17 20:54:42 +02:00
|
|
|
ret = wine_server_call( req );
|
2008-12-08 16:05:17 +01:00
|
|
|
*handle = wine_server_ptr_handle( reply->handle );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
2007-10-25 20:09:54 +02:00
|
|
|
|
2016-01-15 09:40:56 +01:00
|
|
|
RtlFreeHeap( GetProcessHeap(), 0, objattr );
|
2002-09-17 20:54:42 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtOpenSection (NTDLL.@)
|
|
|
|
* ZwOpenSection (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtOpenSection( HANDLE *handle, ACCESS_MASK access, const OBJECT_ATTRIBUTES *attr )
|
|
|
|
{
|
|
|
|
NTSTATUS ret;
|
|
|
|
|
2016-01-29 06:57:04 +01:00
|
|
|
if ((ret = validate_open_object_attributes( attr ))) return ret;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
|
|
|
SERVER_START_REQ( open_mapping )
|
|
|
|
{
|
2016-01-29 06:57:04 +01:00
|
|
|
req->access = access;
|
2008-05-20 07:48:29 +02:00
|
|
|
req->attributes = attr->Attributes;
|
2016-01-29 06:57:04 +01:00
|
|
|
req->rootdir = wine_server_obj_handle( attr->RootDirectory );
|
|
|
|
if (attr->ObjectName)
|
|
|
|
wine_server_add_data( req, attr->ObjectName->Buffer, attr->ObjectName->Length );
|
|
|
|
ret = wine_server_call( req );
|
|
|
|
*handle = wine_server_ptr_handle( reply->handle );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtMapViewOfSection (NTDLL.@)
|
|
|
|
* ZwMapViewOfSection (NTDLL.@)
|
|
|
|
*/
|
2019-08-01 10:07:40 +02:00
|
|
|
NTSTATUS WINAPI NtMapViewOfSection( HANDLE handle, HANDLE process, PVOID *addr_ptr, ULONG_PTR zero_bits,
|
2005-07-15 12:01:30 +02:00
|
|
|
SIZE_T commit_size, const LARGE_INTEGER *offset_ptr, SIZE_T *size_ptr,
|
2002-09-17 20:54:42 +02:00
|
|
|
SECTION_INHERIT inherit, ULONG alloc_type, ULONG protect )
|
|
|
|
{
|
|
|
|
NTSTATUS res;
|
2020-04-30 10:07:36 +02:00
|
|
|
SIZE_T mask = granularity_mask;
|
2016-07-26 12:38:50 +02:00
|
|
|
pe_image_info_t image_info;
|
2005-04-19 12:31:28 +02:00
|
|
|
LARGE_INTEGER offset;
|
2019-08-01 10:07:44 +02:00
|
|
|
unsigned short zero_bits_64 = zero_bits_win_to_64( zero_bits );
|
2005-04-19 12:31:28 +02:00
|
|
|
|
|
|
|
offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2006-10-16 13:49:06 +02:00
|
|
|
TRACE("handle=%p process=%p addr=%p off=%x%08x size=%lx access=%x\n",
|
2009-02-16 11:47:32 +01:00
|
|
|
handle, process, *addr_ptr, offset.u.HighPart, offset.u.LowPart, *size_ptr, protect );
|
2004-09-22 06:03:10 +02:00
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/* Check parameters */
|
2019-06-20 16:05:38 +02:00
|
|
|
if (zero_bits > 21 && zero_bits < 32)
|
|
|
|
return STATUS_INVALID_PARAMETER_4;
|
|
|
|
if (!is_win64 && !is_wow64 && zero_bits >= 32)
|
|
|
|
return STATUS_INVALID_PARAMETER_4;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2019-06-20 16:05:38 +02:00
|
|
|
/* If both addr_ptr and zero_bits are passed, they have match */
|
|
|
|
if (*addr_ptr && zero_bits && zero_bits < 32 &&
|
|
|
|
(((UINT_PTR)*addr_ptr) >> (32 - zero_bits)))
|
|
|
|
return STATUS_INVALID_PARAMETER_4;
|
|
|
|
if (*addr_ptr && zero_bits >= 32 &&
|
|
|
|
(((UINT_PTR)*addr_ptr) & ~zero_bits))
|
2015-06-26 07:46:23 +02:00
|
|
|
return STATUS_INVALID_PARAMETER_4;
|
|
|
|
|
2015-06-26 07:46:43 +02:00
|
|
|
#ifndef _WIN64
|
|
|
|
if (!is_wow64 && (alloc_type & AT_ROUND_TO_PAGE))
|
|
|
|
{
|
|
|
|
*addr_ptr = ROUND_ADDR( *addr_ptr, page_mask );
|
|
|
|
mask = page_mask;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2006-07-13 20:36:41 +02:00
|
|
|
if ((offset.u.LowPart & mask) || (*addr_ptr && ((UINT_PTR)*addr_ptr & mask)))
|
2015-06-05 07:35:11 +02:00
|
|
|
return STATUS_MAPPED_ALIGNMENT;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2007-01-18 15:17:51 +01:00
|
|
|
if (process != NtCurrentProcess())
|
|
|
|
{
|
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2019-08-01 10:07:44 +02:00
|
|
|
call.map_view.type = APC_MAP_VIEW;
|
|
|
|
call.map_view.handle = wine_server_obj_handle( handle );
|
|
|
|
call.map_view.addr = wine_server_client_ptr( *addr_ptr );
|
|
|
|
call.map_view.size = *size_ptr;
|
|
|
|
call.map_view.offset = offset.QuadPart;
|
2020-06-01 13:27:40 +02:00
|
|
|
call.map_view.zero_bits = zero_bits;
|
2019-08-01 10:07:44 +02:00
|
|
|
call.map_view.alloc_type = alloc_type;
|
|
|
|
call.map_view.prot = protect;
|
2020-06-01 13:40:25 +02:00
|
|
|
res = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-18 15:17:51 +01:00
|
|
|
if (res != STATUS_SUCCESS) return res;
|
|
|
|
|
2010-03-03 20:04:55 +01:00
|
|
|
if ((NTSTATUS)result.map_view.status >= 0)
|
2007-01-18 15:17:51 +01:00
|
|
|
{
|
2008-12-30 15:05:38 +01:00
|
|
|
*addr_ptr = wine_server_get_ptr( result.map_view.addr );
|
2007-01-18 15:17:51 +01:00
|
|
|
*size_ptr = result.map_view.size;
|
|
|
|
}
|
|
|
|
return result.map_view.status;
|
|
|
|
}
|
|
|
|
|
2019-08-01 10:07:44 +02:00
|
|
|
return virtual_map_section( handle, addr_ptr, zero_bits_64, commit_size,
|
2019-08-01 10:07:38 +02:00
|
|
|
offset_ptr, size_ptr, alloc_type, protect,
|
|
|
|
&image_info );
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtUnmapViewOfSection (NTDLL.@)
|
|
|
|
* ZwUnmapViewOfSection (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtUnmapViewOfSection( HANDLE process, PVOID addr )
|
|
|
|
{
|
2010-10-19 15:34:36 +02:00
|
|
|
struct file_view *view;
|
2009-11-04 19:53:00 +01:00
|
|
|
NTSTATUS status = STATUS_NOT_MAPPED_VIEW;
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2002-09-17 20:54:42 +02:00
|
|
|
|
2007-01-18 15:17:51 +01:00
|
|
|
if (process != NtCurrentProcess())
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-18 15:17:51 +01:00
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2007-01-18 15:17:51 +01:00
|
|
|
call.unmap_view.type = APC_UNMAP_VIEW;
|
2008-12-30 15:05:38 +01:00
|
|
|
call.unmap_view.addr = wine_server_client_ptr( addr );
|
2020-06-01 13:40:25 +02:00
|
|
|
status = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-18 15:17:51 +01:00
|
|
|
if (status == STATUS_SUCCESS) status = result.unmap_view.status;
|
|
|
|
return status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2007-01-18 15:17:51 +01:00
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2017-09-12 12:59:28 +02:00
|
|
|
if ((view = VIRTUAL_FindView( addr, 0 )) && !is_view_valloc( view ))
|
2004-04-06 22:16:51 +02:00
|
|
|
{
|
2017-10-09 10:44:50 +02:00
|
|
|
if (!(view->protect & VPROT_SYSTEM))
|
2017-09-26 14:11:49 +02:00
|
|
|
{
|
2017-10-09 10:44:50 +02:00
|
|
|
SERVER_START_REQ( unmap_view )
|
|
|
|
{
|
|
|
|
req->base = wine_server_client_ptr( view->base );
|
|
|
|
status = wine_server_call( req );
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
if (!status) delete_view( view );
|
|
|
|
else FIXME( "failed to unmap %p %x\n", view->base, status );
|
2017-09-26 14:11:49 +02:00
|
|
|
}
|
2018-02-23 10:22:19 +01:00
|
|
|
else
|
|
|
|
{
|
|
|
|
delete_view( view );
|
|
|
|
status = STATUS_SUCCESS;
|
|
|
|
}
|
2004-04-06 22:16:51 +02:00
|
|
|
}
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2004-04-06 22:16:51 +02:00
|
|
|
return status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-10-24 12:32:30 +02:00
|
|
|
/******************************************************************************
|
|
|
|
* virtual_fill_image_information
|
|
|
|
*
|
|
|
|
* Helper for NtQuerySection.
|
|
|
|
*/
|
|
|
|
void virtual_fill_image_information( const pe_image_info_t *pe_info, SECTION_IMAGE_INFORMATION *info )
|
|
|
|
{
|
|
|
|
info->TransferAddress = wine_server_get_ptr( pe_info->entry_point );
|
|
|
|
info->ZeroBits = pe_info->zerobits;
|
|
|
|
info->MaximumStackSize = pe_info->stack_size;
|
|
|
|
info->CommittedStackSize = pe_info->stack_commit;
|
|
|
|
info->SubSystemType = pe_info->subsystem;
|
|
|
|
info->SubsystemVersionLow = pe_info->subsystem_low;
|
|
|
|
info->SubsystemVersionHigh = pe_info->subsystem_high;
|
|
|
|
info->GpValue = pe_info->gp;
|
|
|
|
info->ImageCharacteristics = pe_info->image_charact;
|
|
|
|
info->DllCharacteristics = pe_info->dll_charact;
|
|
|
|
info->Machine = pe_info->machine;
|
|
|
|
info->ImageContainsCode = pe_info->contains_code;
|
2019-04-22 11:34:25 +02:00
|
|
|
info->u.ImageFlags = pe_info->image_flags & ~(IMAGE_FLAGS_WineBuiltin|IMAGE_FLAGS_WineFakeDll);
|
2018-10-24 12:32:30 +02:00
|
|
|
info->LoaderFlags = pe_info->loader_flags;
|
|
|
|
info->ImageFileSize = pe_info->file_size;
|
|
|
|
info->CheckSum = pe_info->checksum;
|
|
|
|
#ifndef _WIN64 /* don't return 64-bit values to 32-bit processes */
|
|
|
|
if (pe_info->machine == IMAGE_FILE_MACHINE_AMD64 || pe_info->machine == IMAGE_FILE_MACHINE_ARM64)
|
|
|
|
{
|
|
|
|
info->TransferAddress = (void *)0x81231234; /* sic */
|
|
|
|
info->MaximumStackSize = 0x100000;
|
|
|
|
info->CommittedStackSize = 0x10000;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-07-26 07:43:02 +02:00
|
|
|
/******************************************************************************
|
|
|
|
* NtQuerySection (NTDLL.@)
|
|
|
|
* ZwQuerySection (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtQuerySection( HANDLE handle, SECTION_INFORMATION_CLASS class, void *ptr,
|
2017-12-28 13:04:20 +01:00
|
|
|
SIZE_T size, SIZE_T *ret_size )
|
2016-07-26 07:43:02 +02:00
|
|
|
{
|
|
|
|
NTSTATUS status;
|
2016-07-26 12:38:50 +02:00
|
|
|
pe_image_info_t image_info;
|
2016-07-26 07:43:02 +02:00
|
|
|
|
2016-07-26 12:38:50 +02:00
|
|
|
switch (class)
|
2016-07-26 07:43:02 +02:00
|
|
|
{
|
2016-07-26 12:38:50 +02:00
|
|
|
case SectionBasicInformation:
|
|
|
|
if (size < sizeof(SECTION_BASIC_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
|
|
|
|
break;
|
|
|
|
case SectionImageInformation:
|
|
|
|
if (size < sizeof(SECTION_IMAGE_INFORMATION)) return STATUS_INFO_LENGTH_MISMATCH;
|
|
|
|
break;
|
|
|
|
default:
|
2016-07-26 07:43:02 +02:00
|
|
|
FIXME( "class %u not implemented\n", class );
|
|
|
|
return STATUS_NOT_IMPLEMENTED;
|
|
|
|
}
|
2016-07-26 12:38:50 +02:00
|
|
|
if (!ptr) return STATUS_ACCESS_VIOLATION;
|
2016-07-26 07:43:02 +02:00
|
|
|
|
|
|
|
SERVER_START_REQ( get_mapping_info )
|
|
|
|
{
|
|
|
|
req->handle = wine_server_obj_handle( handle );
|
|
|
|
req->access = SECTION_QUERY;
|
2016-07-26 12:38:50 +02:00
|
|
|
wine_server_set_reply( req, &image_info, sizeof(image_info) );
|
2016-07-26 07:43:02 +02:00
|
|
|
if (!(status = wine_server_call( req )))
|
|
|
|
{
|
2016-07-26 12:38:50 +02:00
|
|
|
if (class == SectionBasicInformation)
|
|
|
|
{
|
|
|
|
SECTION_BASIC_INFORMATION *info = ptr;
|
|
|
|
info->Attributes = reply->flags;
|
|
|
|
info->BaseAddress = NULL;
|
|
|
|
info->Size.QuadPart = reply->size;
|
|
|
|
if (ret_size) *ret_size = sizeof(*info);
|
|
|
|
}
|
|
|
|
else if (reply->flags & SEC_IMAGE)
|
|
|
|
{
|
|
|
|
SECTION_IMAGE_INFORMATION *info = ptr;
|
2018-10-24 12:32:30 +02:00
|
|
|
virtual_fill_image_information( &image_info, info );
|
2016-07-26 12:38:50 +02:00
|
|
|
if (ret_size) *ret_size = sizeof(*info);
|
|
|
|
}
|
|
|
|
else status = STATUS_SECTION_NOT_IMAGE;
|
2016-07-26 07:43:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-17 20:54:42 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* NtFlushVirtualMemory (NTDLL.@)
|
|
|
|
* ZwFlushVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtFlushVirtualMemory( HANDLE process, LPCVOID *addr_ptr,
|
2005-07-15 12:01:30 +02:00
|
|
|
SIZE_T *size_ptr, ULONG unknown )
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2010-10-19 15:34:36 +02:00
|
|
|
struct file_view *view;
|
2004-04-06 22:16:51 +02:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
2007-01-12 20:15:52 +01:00
|
|
|
sigset_t sigset;
|
2002-09-17 20:54:42 +02:00
|
|
|
void *addr = ROUND_ADDR( *addr_ptr, page_mask );
|
|
|
|
|
2007-01-15 22:31:07 +01:00
|
|
|
if (process != NtCurrentProcess())
|
2002-09-17 20:54:42 +02:00
|
|
|
{
|
2007-01-15 22:31:07 +01:00
|
|
|
apc_call_t call;
|
|
|
|
apc_result_t result;
|
|
|
|
|
2007-12-11 15:07:42 +01:00
|
|
|
memset( &call, 0, sizeof(call) );
|
|
|
|
|
2007-01-15 22:31:07 +01:00
|
|
|
call.virtual_flush.type = APC_VIRTUAL_FLUSH;
|
2008-12-30 15:05:38 +01:00
|
|
|
call.virtual_flush.addr = wine_server_client_ptr( addr );
|
2007-01-15 22:31:07 +01:00
|
|
|
call.virtual_flush.size = *size_ptr;
|
2020-06-01 13:40:25 +02:00
|
|
|
status = unix_funcs->server_queue_process_apc( process, &call, &result );
|
2007-01-15 22:31:07 +01:00
|
|
|
if (status != STATUS_SUCCESS) return status;
|
|
|
|
|
|
|
|
if (result.virtual_flush.status == STATUS_SUCCESS)
|
|
|
|
{
|
2008-12-30 15:05:38 +01:00
|
|
|
*addr_ptr = wine_server_get_ptr( result.virtual_flush.addr );
|
2007-01-15 22:31:07 +01:00
|
|
|
*size_ptr = result.virtual_flush.size;
|
|
|
|
}
|
|
|
|
return result.virtual_flush.status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2007-01-15 22:31:07 +01:00
|
|
|
|
2007-01-12 20:15:52 +01:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
2008-11-25 11:58:50 +01:00
|
|
|
if (!(view = VIRTUAL_FindView( addr, *size_ptr ))) status = STATUS_INVALID_PARAMETER;
|
2004-04-06 22:16:51 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
if (!*size_ptr) *size_ptr = view->size;
|
|
|
|
*addr_ptr = addr;
|
2014-04-24 14:33:26 +02:00
|
|
|
#ifdef MS_ASYNC
|
|
|
|
if (msync( addr, *size_ptr, MS_ASYNC )) status = STATUS_NOT_MAPPED_DATA;
|
|
|
|
#endif
|
2004-04-06 22:16:51 +02:00
|
|
|
}
|
2007-01-12 20:15:52 +01:00
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
2004-04-06 22:16:51 +02:00
|
|
|
return status;
|
2002-09-17 20:54:42 +02:00
|
|
|
}
|
2003-07-08 23:18:45 +02:00
|
|
|
|
|
|
|
|
2008-11-14 10:49:02 +01:00
|
|
|
/***********************************************************************
|
|
|
|
* NtGetWriteWatch (NTDLL.@)
|
|
|
|
* ZwGetWriteWatch (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtGetWriteWatch( HANDLE process, ULONG flags, PVOID base, SIZE_T size, PVOID *addresses,
|
|
|
|
ULONG_PTR *count, ULONG *granularity )
|
|
|
|
{
|
2008-11-25 12:07:35 +01:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
|
|
|
sigset_t sigset;
|
|
|
|
|
|
|
|
size = ROUND_SIZE( base, size );
|
|
|
|
base = ROUND_ADDR( base, page_mask );
|
|
|
|
|
|
|
|
if (!count || !granularity) return STATUS_ACCESS_VIOLATION;
|
|
|
|
if (!*count || !size) return STATUS_INVALID_PARAMETER;
|
|
|
|
if (flags & ~WRITE_WATCH_FLAG_RESET) return STATUS_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
if (!addresses) return STATUS_ACCESS_VIOLATION;
|
|
|
|
|
|
|
|
TRACE( "%p %x %p-%p %p %lu\n", process, flags, base, (char *)base + size,
|
2010-05-19 00:50:08 +02:00
|
|
|
addresses, *count );
|
2008-11-25 12:07:35 +01:00
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
2017-09-22 12:15:27 +02:00
|
|
|
if (is_write_watch_range( base, size ))
|
2008-11-25 12:07:35 +01:00
|
|
|
{
|
|
|
|
ULONG_PTR pos = 0;
|
|
|
|
char *addr = base;
|
|
|
|
char *end = addr + size;
|
|
|
|
|
|
|
|
while (pos < *count && addr < end)
|
|
|
|
{
|
2017-09-05 16:20:23 +02:00
|
|
|
if (!(get_page_vprot( addr ) & VPROT_WRITEWATCH)) addresses[pos++] = addr;
|
2008-11-25 12:07:35 +01:00
|
|
|
addr += page_size;
|
|
|
|
}
|
2017-09-22 09:52:29 +02:00
|
|
|
if (flags & WRITE_WATCH_FLAG_RESET) reset_write_watches( base, addr - (char *)base );
|
2008-11-25 12:07:35 +01:00
|
|
|
*count = pos;
|
|
|
|
*granularity = page_size;
|
|
|
|
}
|
|
|
|
else status = STATUS_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
return status;
|
2008-11-14 10:49:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtResetWriteWatch (NTDLL.@)
|
|
|
|
* ZwResetWriteWatch (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtResetWriteWatch( HANDLE process, PVOID base, SIZE_T size )
|
|
|
|
{
|
2008-11-25 12:07:35 +01:00
|
|
|
NTSTATUS status = STATUS_SUCCESS;
|
|
|
|
sigset_t sigset;
|
|
|
|
|
|
|
|
size = ROUND_SIZE( base, size );
|
|
|
|
base = ROUND_ADDR( base, page_mask );
|
|
|
|
|
|
|
|
TRACE( "%p %p-%p\n", process, base, (char *)base + size );
|
|
|
|
|
|
|
|
if (!size) return STATUS_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
2017-09-22 12:15:27 +02:00
|
|
|
if (is_write_watch_range( base, size ))
|
2017-09-22 09:52:29 +02:00
|
|
|
reset_write_watches( base, size );
|
2008-11-25 12:07:35 +01:00
|
|
|
else
|
|
|
|
status = STATUS_INVALID_PARAMETER;
|
|
|
|
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
return status;
|
2008-11-14 10:49:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2003-07-08 23:18:45 +02:00
|
|
|
/***********************************************************************
|
|
|
|
* NtReadVirtualMemory (NTDLL.@)
|
|
|
|
* ZwReadVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtReadVirtualMemory( HANDLE process, const void *addr, void *buffer,
|
|
|
|
SIZE_T size, SIZE_T *bytes_read )
|
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
|
2009-01-14 20:36:27 +01:00
|
|
|
if (virtual_check_buffer_for_write( buffer, size ))
|
2003-07-08 23:18:45 +02:00
|
|
|
{
|
2009-01-14 20:36:27 +01:00
|
|
|
SERVER_START_REQ( read_process_memory )
|
|
|
|
{
|
|
|
|
req->handle = wine_server_obj_handle( process );
|
|
|
|
req->addr = wine_server_client_ptr( addr );
|
|
|
|
wine_server_set_reply( req, buffer, size );
|
|
|
|
if ((status = wine_server_call( req ))) size = 0;
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
status = STATUS_ACCESS_VIOLATION;
|
|
|
|
size = 0;
|
2003-07-08 23:18:45 +02:00
|
|
|
}
|
|
|
|
if (bytes_read) *bytes_read = size;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtWriteVirtualMemory (NTDLL.@)
|
|
|
|
* ZwWriteVirtualMemory (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtWriteVirtualMemory( HANDLE process, void *addr, const void *buffer,
|
|
|
|
SIZE_T size, SIZE_T *bytes_written )
|
|
|
|
{
|
|
|
|
NTSTATUS status;
|
|
|
|
|
2009-01-14 20:36:27 +01:00
|
|
|
if (virtual_check_buffer_for_read( buffer, size ))
|
2003-07-08 23:18:45 +02:00
|
|
|
{
|
2009-01-14 20:36:27 +01:00
|
|
|
SERVER_START_REQ( write_process_memory )
|
|
|
|
{
|
|
|
|
req->handle = wine_server_obj_handle( process );
|
|
|
|
req->addr = wine_server_client_ptr( addr );
|
|
|
|
wine_server_add_data( req, buffer, size );
|
|
|
|
if ((status = wine_server_call( req ))) size = 0;
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
status = STATUS_PARTIAL_COPY;
|
|
|
|
size = 0;
|
2003-07-08 23:18:45 +02:00
|
|
|
}
|
|
|
|
if (bytes_written) *bytes_written = size;
|
|
|
|
return status;
|
|
|
|
}
|
2007-09-26 20:50:28 +02:00
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* NtAreMappedFilesTheSame (NTDLL.@)
|
|
|
|
* ZwAreMappedFilesTheSame (NTDLL.@)
|
|
|
|
*/
|
|
|
|
NTSTATUS WINAPI NtAreMappedFilesTheSame(PVOID addr1, PVOID addr2)
|
|
|
|
{
|
2010-10-19 16:14:40 +02:00
|
|
|
struct file_view *view1, *view2;
|
|
|
|
NTSTATUS status;
|
|
|
|
sigset_t sigset;
|
|
|
|
|
2007-09-26 20:50:28 +02:00
|
|
|
TRACE("%p %p\n", addr1, addr2);
|
|
|
|
|
2010-10-19 16:14:40 +02:00
|
|
|
server_enter_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
|
|
|
|
view1 = VIRTUAL_FindView( addr1, 0 );
|
|
|
|
view2 = VIRTUAL_FindView( addr2, 0 );
|
|
|
|
|
|
|
|
if (!view1 || !view2)
|
|
|
|
status = STATUS_INVALID_ADDRESS;
|
2017-09-12 12:59:28 +02:00
|
|
|
else if (is_view_valloc( view1 ) || is_view_valloc( view2 ))
|
2010-10-19 16:14:40 +02:00
|
|
|
status = STATUS_CONFLICTING_ADDRESSES;
|
|
|
|
else if (view1 == view2)
|
|
|
|
status = STATUS_SUCCESS;
|
2017-09-26 14:55:13 +02:00
|
|
|
else if ((view1->protect & VPROT_SYSTEM) || (view2->protect & VPROT_SYSTEM))
|
2013-11-12 21:39:06 +01:00
|
|
|
status = STATUS_NOT_SAME_DEVICE;
|
2010-10-19 16:14:40 +02:00
|
|
|
else
|
2017-09-26 14:55:13 +02:00
|
|
|
{
|
|
|
|
SERVER_START_REQ( is_same_mapping )
|
|
|
|
{
|
|
|
|
req->base1 = wine_server_client_ptr( view1->base );
|
|
|
|
req->base2 = wine_server_client_ptr( view2->base );
|
|
|
|
status = wine_server_call( req );
|
|
|
|
}
|
|
|
|
SERVER_END_REQ;
|
|
|
|
}
|
2010-10-19 16:14:40 +02:00
|
|
|
|
|
|
|
server_leave_uninterrupted_section( &csVirtual, &sigset );
|
|
|
|
return status;
|
2007-09-26 20:50:28 +02:00
|
|
|
}
|