760 lines
26 KiB
C
760 lines
26 KiB
C
/*
|
|
* x86-64 signal handling routines
|
|
*
|
|
* Copyright 1999, 2005 Alexandre Julliard
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with this library; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
|
*/
|
|
|
|
#if 0
|
|
#pragma makedep unix
|
|
#endif
|
|
|
|
#ifdef __x86_64__
|
|
|
|
#include "config.h"
|
|
#include "wine/port.h"
|
|
|
|
#include <assert.h>
|
|
#include <pthread.h>
|
|
#include <signal.h>
|
|
#include <stdlib.h>
|
|
#include <stdarg.h>
|
|
#include <stdio.h>
|
|
#include <sys/types.h>
|
|
#ifdef HAVE_UNISTD_H
|
|
# include <unistd.h>
|
|
#endif
|
|
#ifdef HAVE_MACHINE_SYSARCH_H
|
|
# include <machine/sysarch.h>
|
|
#endif
|
|
#ifdef HAVE_SYS_PARAM_H
|
|
# include <sys/param.h>
|
|
#endif
|
|
#ifdef HAVE_SYSCALL_H
|
|
# include <syscall.h>
|
|
#else
|
|
# ifdef HAVE_SYS_SYSCALL_H
|
|
# include <sys/syscall.h>
|
|
# endif
|
|
#endif
|
|
#ifdef HAVE_SYS_SIGNAL_H
|
|
# include <sys/signal.h>
|
|
#endif
|
|
#ifdef HAVE_SYS_UCONTEXT_H
|
|
# include <sys/ucontext.h>
|
|
#endif
|
|
#ifdef HAVE_LIBUNWIND
|
|
# define UNW_LOCAL_ONLY
|
|
# include <libunwind.h>
|
|
#endif
|
|
#ifdef __APPLE__
|
|
# include <mach/mach.h>
|
|
#endif
|
|
|
|
#define NONAMELESSUNION
|
|
#define NONAMELESSSTRUCT
|
|
#include "ntstatus.h"
|
|
#define WIN32_NO_STATUS
|
|
#include "windef.h"
|
|
#include "winternl.h"
|
|
#include "wine/exception.h"
|
|
#include "wine/list.h"
|
|
#include "wine/asm.h"
|
|
#include "unix_private.h"
|
|
#include "wine/debug.h"
|
|
|
|
WINE_DEFAULT_DEBUG_CHANNEL(seh);
|
|
|
|
/***********************************************************************
|
|
* signal context platform-specific definitions
|
|
*/
|
|
#ifdef linux
|
|
|
|
#include <asm/prctl.h>
|
|
static inline int arch_prctl( int func, void *ptr ) { return syscall( __NR_arch_prctl, func, ptr ); }
|
|
|
|
#endif
|
|
|
|
enum i386_trap_code
|
|
{
|
|
TRAP_x86_UNKNOWN = -1, /* Unknown fault (TRAP_sig not defined) */
|
|
TRAP_x86_DIVIDE = 0, /* Division by zero exception */
|
|
TRAP_x86_TRCTRAP = 1, /* Single-step exception */
|
|
TRAP_x86_NMI = 2, /* NMI interrupt */
|
|
TRAP_x86_BPTFLT = 3, /* Breakpoint exception */
|
|
TRAP_x86_OFLOW = 4, /* Overflow exception */
|
|
TRAP_x86_BOUND = 5, /* Bound range exception */
|
|
TRAP_x86_PRIVINFLT = 6, /* Invalid opcode exception */
|
|
TRAP_x86_DNA = 7, /* Device not available exception */
|
|
TRAP_x86_DOUBLEFLT = 8, /* Double fault exception */
|
|
TRAP_x86_FPOPFLT = 9, /* Coprocessor segment overrun */
|
|
TRAP_x86_TSSFLT = 10, /* Invalid TSS exception */
|
|
TRAP_x86_SEGNPFLT = 11, /* Segment not present exception */
|
|
TRAP_x86_STKFLT = 12, /* Stack fault */
|
|
TRAP_x86_PROTFLT = 13, /* General protection fault */
|
|
TRAP_x86_PAGEFLT = 14, /* Page fault */
|
|
TRAP_x86_ARITHTRAP = 16, /* Floating point exception */
|
|
TRAP_x86_ALIGNFLT = 17, /* Alignment check exception */
|
|
TRAP_x86_MCHK = 18, /* Machine check exception */
|
|
TRAP_x86_CACHEFLT = 19 /* Cache flush exception */
|
|
};
|
|
|
|
static const size_t teb_size = 0x2000; /* we reserve two pages for the TEB */
|
|
|
|
struct amd64_thread_data
|
|
{
|
|
DWORD_PTR dr0; /* debug registers */
|
|
DWORD_PTR dr1;
|
|
DWORD_PTR dr2;
|
|
DWORD_PTR dr3;
|
|
DWORD_PTR dr6;
|
|
DWORD_PTR dr7;
|
|
void *exit_frame; /* exit frame pointer */
|
|
};
|
|
|
|
C_ASSERT( sizeof(struct amd64_thread_data) <= sizeof(((TEB *)0)->SystemReserved2) );
|
|
C_ASSERT( offsetof( TEB, SystemReserved2 ) + offsetof( struct amd64_thread_data, exit_frame ) == 0x330 );
|
|
|
|
static inline struct amd64_thread_data *amd64_thread_data(void)
|
|
{
|
|
return (struct amd64_thread_data *)NtCurrentTeb()->SystemReserved2;
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* set_full_cpu_context
|
|
*
|
|
* Set the new CPU context.
|
|
*/
|
|
extern void set_full_cpu_context( const CONTEXT *context );
|
|
__ASM_GLOBAL_FUNC( set_full_cpu_context,
|
|
"subq $40,%rsp\n\t"
|
|
__ASM_SEH(".seh_stackalloc 0x40\n\t")
|
|
__ASM_SEH(".seh_endprologue\n\t")
|
|
__ASM_CFI(".cfi_adjust_cfa_offset 40\n\t")
|
|
"ldmxcsr 0x34(%rdi)\n\t" /* context->MxCsr */
|
|
"movw 0x38(%rdi),%ax\n\t" /* context->SegCs */
|
|
"movq %rax,8(%rsp)\n\t"
|
|
"movw 0x42(%rdi),%ax\n\t" /* context->SegSs */
|
|
"movq %rax,32(%rsp)\n\t"
|
|
"movq 0x44(%rdi),%rax\n\t" /* context->Eflags */
|
|
"movq %rax,16(%rsp)\n\t"
|
|
"movq 0x80(%rdi),%rcx\n\t" /* context->Rcx */
|
|
"movq 0x88(%rdi),%rdx\n\t" /* context->Rdx */
|
|
"movq 0x90(%rdi),%rbx\n\t" /* context->Rbx */
|
|
"movq 0x98(%rdi),%rax\n\t" /* context->Rsp */
|
|
"movq %rax,24(%rsp)\n\t"
|
|
"movq 0xa0(%rdi),%rbp\n\t" /* context->Rbp */
|
|
"movq 0xa8(%rdi),%rsi\n\t" /* context->Rsi */
|
|
"movq 0xb8(%rdi),%r8\n\t" /* context->R8 */
|
|
"movq 0xc0(%rdi),%r9\n\t" /* context->R9 */
|
|
"movq 0xc8(%rdi),%r10\n\t" /* context->R10 */
|
|
"movq 0xd0(%rdi),%r11\n\t" /* context->R11 */
|
|
"movq 0xd8(%rdi),%r12\n\t" /* context->R12 */
|
|
"movq 0xe0(%rdi),%r13\n\t" /* context->R13 */
|
|
"movq 0xe8(%rdi),%r14\n\t" /* context->R14 */
|
|
"movq 0xf0(%rdi),%r15\n\t" /* context->R15 */
|
|
"movq 0xf8(%rdi),%rax\n\t" /* context->Rip */
|
|
"movq %rax,(%rsp)\n\t"
|
|
"fxrstor 0x100(%rdi)\n\t" /* context->FtlSave */
|
|
"movdqa 0x1a0(%rdi),%xmm0\n\t" /* context->Xmm0 */
|
|
"movdqa 0x1b0(%rdi),%xmm1\n\t" /* context->Xmm1 */
|
|
"movdqa 0x1c0(%rdi),%xmm2\n\t" /* context->Xmm2 */
|
|
"movdqa 0x1d0(%rdi),%xmm3\n\t" /* context->Xmm3 */
|
|
"movdqa 0x1e0(%rdi),%xmm4\n\t" /* context->Xmm4 */
|
|
"movdqa 0x1f0(%rdi),%xmm5\n\t" /* context->Xmm5 */
|
|
"movdqa 0x200(%rdi),%xmm6\n\t" /* context->Xmm6 */
|
|
"movdqa 0x210(%rdi),%xmm7\n\t" /* context->Xmm7 */
|
|
"movdqa 0x220(%rdi),%xmm8\n\t" /* context->Xmm8 */
|
|
"movdqa 0x230(%rdi),%xmm9\n\t" /* context->Xmm9 */
|
|
"movdqa 0x240(%rdi),%xmm10\n\t" /* context->Xmm10 */
|
|
"movdqa 0x250(%rdi),%xmm11\n\t" /* context->Xmm11 */
|
|
"movdqa 0x260(%rdi),%xmm12\n\t" /* context->Xmm12 */
|
|
"movdqa 0x270(%rdi),%xmm13\n\t" /* context->Xmm13 */
|
|
"movdqa 0x280(%rdi),%xmm14\n\t" /* context->Xmm14 */
|
|
"movdqa 0x290(%rdi),%xmm15\n\t" /* context->Xmm15 */
|
|
"movq 0x78(%rdi),%rax\n\t" /* context->Rax */
|
|
"movq 0xb0(%rdi),%rdi\n\t" /* context->Rdi */
|
|
"iretq" );
|
|
|
|
|
|
/***********************************************************************
|
|
* set_cpu_context
|
|
*
|
|
* Set the new CPU context. Used by NtSetContextThread.
|
|
*/
|
|
void DECLSPEC_HIDDEN set_cpu_context( const CONTEXT *context )
|
|
{
|
|
DWORD flags = context->ContextFlags & ~CONTEXT_AMD64;
|
|
|
|
if (flags & CONTEXT_DEBUG_REGISTERS)
|
|
{
|
|
amd64_thread_data()->dr0 = context->Dr0;
|
|
amd64_thread_data()->dr1 = context->Dr1;
|
|
amd64_thread_data()->dr2 = context->Dr2;
|
|
amd64_thread_data()->dr3 = context->Dr3;
|
|
amd64_thread_data()->dr6 = context->Dr6;
|
|
amd64_thread_data()->dr7 = context->Dr7;
|
|
}
|
|
if (flags & CONTEXT_FULL)
|
|
{
|
|
if (!(flags & CONTEXT_CONTROL))
|
|
FIXME( "setting partial context (%x) not supported\n", flags );
|
|
else
|
|
set_full_cpu_context( context );
|
|
}
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* get_server_context_flags
|
|
*
|
|
* Convert CPU-specific flags to generic server flags
|
|
*/
|
|
static unsigned int get_server_context_flags( DWORD flags )
|
|
{
|
|
unsigned int ret = 0;
|
|
|
|
flags &= ~CONTEXT_AMD64; /* get rid of CPU id */
|
|
if (flags & CONTEXT_CONTROL) ret |= SERVER_CTX_CONTROL;
|
|
if (flags & CONTEXT_INTEGER) ret |= SERVER_CTX_INTEGER;
|
|
if (flags & CONTEXT_SEGMENTS) ret |= SERVER_CTX_SEGMENTS;
|
|
if (flags & CONTEXT_FLOATING_POINT) ret |= SERVER_CTX_FLOATING_POINT;
|
|
if (flags & CONTEXT_DEBUG_REGISTERS) ret |= SERVER_CTX_DEBUG_REGISTERS;
|
|
return ret;
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* copy_context
|
|
*
|
|
* Copy a register context according to the flags.
|
|
*/
|
|
static void copy_context( CONTEXT *to, const CONTEXT *from, DWORD flags )
|
|
{
|
|
flags &= ~CONTEXT_AMD64; /* get rid of CPU id */
|
|
if (flags & CONTEXT_CONTROL)
|
|
{
|
|
to->Rbp = from->Rbp;
|
|
to->Rip = from->Rip;
|
|
to->Rsp = from->Rsp;
|
|
to->SegCs = from->SegCs;
|
|
to->SegSs = from->SegSs;
|
|
to->EFlags = from->EFlags;
|
|
}
|
|
if (flags & CONTEXT_INTEGER)
|
|
{
|
|
to->Rax = from->Rax;
|
|
to->Rcx = from->Rcx;
|
|
to->Rdx = from->Rdx;
|
|
to->Rbx = from->Rbx;
|
|
to->Rsi = from->Rsi;
|
|
to->Rdi = from->Rdi;
|
|
to->R8 = from->R8;
|
|
to->R9 = from->R9;
|
|
to->R10 = from->R10;
|
|
to->R11 = from->R11;
|
|
to->R12 = from->R12;
|
|
to->R13 = from->R13;
|
|
to->R14 = from->R14;
|
|
to->R15 = from->R15;
|
|
}
|
|
if (flags & CONTEXT_SEGMENTS)
|
|
{
|
|
to->SegDs = from->SegDs;
|
|
to->SegEs = from->SegEs;
|
|
to->SegFs = from->SegFs;
|
|
to->SegGs = from->SegGs;
|
|
}
|
|
if (flags & CONTEXT_FLOATING_POINT)
|
|
{
|
|
to->MxCsr = from->MxCsr;
|
|
to->u.FltSave = from->u.FltSave;
|
|
}
|
|
if (flags & CONTEXT_DEBUG_REGISTERS)
|
|
{
|
|
to->Dr0 = from->Dr0;
|
|
to->Dr1 = from->Dr1;
|
|
to->Dr2 = from->Dr2;
|
|
to->Dr3 = from->Dr3;
|
|
to->Dr6 = from->Dr6;
|
|
to->Dr7 = from->Dr7;
|
|
}
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* context_to_server
|
|
*
|
|
* Convert a register context to the server format.
|
|
*/
|
|
NTSTATUS context_to_server( context_t *to, const CONTEXT *from )
|
|
{
|
|
DWORD flags = from->ContextFlags & ~CONTEXT_AMD64; /* get rid of CPU id */
|
|
|
|
memset( to, 0, sizeof(*to) );
|
|
to->cpu = CPU_x86_64;
|
|
|
|
if (flags & CONTEXT_CONTROL)
|
|
{
|
|
to->flags |= SERVER_CTX_CONTROL;
|
|
to->ctl.x86_64_regs.rbp = from->Rbp;
|
|
to->ctl.x86_64_regs.rip = from->Rip;
|
|
to->ctl.x86_64_regs.rsp = from->Rsp;
|
|
to->ctl.x86_64_regs.cs = from->SegCs;
|
|
to->ctl.x86_64_regs.ss = from->SegSs;
|
|
to->ctl.x86_64_regs.flags = from->EFlags;
|
|
}
|
|
if (flags & CONTEXT_INTEGER)
|
|
{
|
|
to->flags |= SERVER_CTX_INTEGER;
|
|
to->integer.x86_64_regs.rax = from->Rax;
|
|
to->integer.x86_64_regs.rcx = from->Rcx;
|
|
to->integer.x86_64_regs.rdx = from->Rdx;
|
|
to->integer.x86_64_regs.rbx = from->Rbx;
|
|
to->integer.x86_64_regs.rsi = from->Rsi;
|
|
to->integer.x86_64_regs.rdi = from->Rdi;
|
|
to->integer.x86_64_regs.r8 = from->R8;
|
|
to->integer.x86_64_regs.r9 = from->R9;
|
|
to->integer.x86_64_regs.r10 = from->R10;
|
|
to->integer.x86_64_regs.r11 = from->R11;
|
|
to->integer.x86_64_regs.r12 = from->R12;
|
|
to->integer.x86_64_regs.r13 = from->R13;
|
|
to->integer.x86_64_regs.r14 = from->R14;
|
|
to->integer.x86_64_regs.r15 = from->R15;
|
|
}
|
|
if (flags & CONTEXT_SEGMENTS)
|
|
{
|
|
to->flags |= SERVER_CTX_SEGMENTS;
|
|
to->seg.x86_64_regs.ds = from->SegDs;
|
|
to->seg.x86_64_regs.es = from->SegEs;
|
|
to->seg.x86_64_regs.fs = from->SegFs;
|
|
to->seg.x86_64_regs.gs = from->SegGs;
|
|
}
|
|
if (flags & CONTEXT_FLOATING_POINT)
|
|
{
|
|
to->flags |= SERVER_CTX_FLOATING_POINT;
|
|
memcpy( to->fp.x86_64_regs.fpregs, &from->u.FltSave, sizeof(to->fp.x86_64_regs.fpregs) );
|
|
}
|
|
if (flags & CONTEXT_DEBUG_REGISTERS)
|
|
{
|
|
to->flags |= SERVER_CTX_DEBUG_REGISTERS;
|
|
to->debug.x86_64_regs.dr0 = from->Dr0;
|
|
to->debug.x86_64_regs.dr1 = from->Dr1;
|
|
to->debug.x86_64_regs.dr2 = from->Dr2;
|
|
to->debug.x86_64_regs.dr3 = from->Dr3;
|
|
to->debug.x86_64_regs.dr6 = from->Dr6;
|
|
to->debug.x86_64_regs.dr7 = from->Dr7;
|
|
}
|
|
return STATUS_SUCCESS;
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* context_from_server
|
|
*
|
|
* Convert a register context from the server format.
|
|
*/
|
|
NTSTATUS context_from_server( CONTEXT *to, const context_t *from )
|
|
{
|
|
if (from->cpu != CPU_x86_64) return STATUS_INVALID_PARAMETER;
|
|
|
|
to->ContextFlags = CONTEXT_AMD64;
|
|
if (from->flags & SERVER_CTX_CONTROL)
|
|
{
|
|
to->ContextFlags |= CONTEXT_CONTROL;
|
|
to->Rbp = from->ctl.x86_64_regs.rbp;
|
|
to->Rip = from->ctl.x86_64_regs.rip;
|
|
to->Rsp = from->ctl.x86_64_regs.rsp;
|
|
to->SegCs = from->ctl.x86_64_regs.cs;
|
|
to->SegSs = from->ctl.x86_64_regs.ss;
|
|
to->EFlags = from->ctl.x86_64_regs.flags;
|
|
}
|
|
|
|
if (from->flags & SERVER_CTX_INTEGER)
|
|
{
|
|
to->ContextFlags |= CONTEXT_INTEGER;
|
|
to->Rax = from->integer.x86_64_regs.rax;
|
|
to->Rcx = from->integer.x86_64_regs.rcx;
|
|
to->Rdx = from->integer.x86_64_regs.rdx;
|
|
to->Rbx = from->integer.x86_64_regs.rbx;
|
|
to->Rsi = from->integer.x86_64_regs.rsi;
|
|
to->Rdi = from->integer.x86_64_regs.rdi;
|
|
to->R8 = from->integer.x86_64_regs.r8;
|
|
to->R9 = from->integer.x86_64_regs.r9;
|
|
to->R10 = from->integer.x86_64_regs.r10;
|
|
to->R11 = from->integer.x86_64_regs.r11;
|
|
to->R12 = from->integer.x86_64_regs.r12;
|
|
to->R13 = from->integer.x86_64_regs.r13;
|
|
to->R14 = from->integer.x86_64_regs.r14;
|
|
to->R15 = from->integer.x86_64_regs.r15;
|
|
}
|
|
if (from->flags & SERVER_CTX_SEGMENTS)
|
|
{
|
|
to->ContextFlags |= CONTEXT_SEGMENTS;
|
|
to->SegDs = from->seg.x86_64_regs.ds;
|
|
to->SegEs = from->seg.x86_64_regs.es;
|
|
to->SegFs = from->seg.x86_64_regs.fs;
|
|
to->SegGs = from->seg.x86_64_regs.gs;
|
|
}
|
|
if (from->flags & SERVER_CTX_FLOATING_POINT)
|
|
{
|
|
to->ContextFlags |= CONTEXT_FLOATING_POINT;
|
|
memcpy( &to->u.FltSave, from->fp.x86_64_regs.fpregs, sizeof(from->fp.x86_64_regs.fpregs) );
|
|
to->MxCsr = to->u.FltSave.MxCsr;
|
|
}
|
|
if (from->flags & SERVER_CTX_DEBUG_REGISTERS)
|
|
{
|
|
to->ContextFlags |= CONTEXT_DEBUG_REGISTERS;
|
|
to->Dr0 = from->debug.x86_64_regs.dr0;
|
|
to->Dr1 = from->debug.x86_64_regs.dr1;
|
|
to->Dr2 = from->debug.x86_64_regs.dr2;
|
|
to->Dr3 = from->debug.x86_64_regs.dr3;
|
|
to->Dr6 = from->debug.x86_64_regs.dr6;
|
|
to->Dr7 = from->debug.x86_64_regs.dr7;
|
|
}
|
|
return STATUS_SUCCESS;
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* NtSetContextThread (NTDLL.@)
|
|
* ZwSetContextThread (NTDLL.@)
|
|
*/
|
|
NTSTATUS WINAPI NtSetContextThread( HANDLE handle, const CONTEXT *context )
|
|
{
|
|
NTSTATUS ret = STATUS_SUCCESS;
|
|
BOOL self = (handle == GetCurrentThread());
|
|
|
|
/* debug registers require a server call */
|
|
if (self && (context->ContextFlags & (CONTEXT_DEBUG_REGISTERS & ~CONTEXT_AMD64)))
|
|
self = (amd64_thread_data()->dr0 == context->Dr0 &&
|
|
amd64_thread_data()->dr1 == context->Dr1 &&
|
|
amd64_thread_data()->dr2 == context->Dr2 &&
|
|
amd64_thread_data()->dr3 == context->Dr3 &&
|
|
amd64_thread_data()->dr6 == context->Dr6 &&
|
|
amd64_thread_data()->dr7 == context->Dr7);
|
|
|
|
if (!self)
|
|
{
|
|
context_t server_context;
|
|
context_to_server( &server_context, context );
|
|
ret = set_thread_context( handle, &server_context, &self );
|
|
}
|
|
if (self && ret == STATUS_SUCCESS) set_cpu_context( context );
|
|
return ret;
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* NtGetContextThread (NTDLL.@)
|
|
* ZwGetContextThread (NTDLL.@)
|
|
*/
|
|
NTSTATUS WINAPI NtGetContextThread( HANDLE handle, CONTEXT *context )
|
|
{
|
|
NTSTATUS ret;
|
|
DWORD needed_flags;
|
|
BOOL self = (handle == GetCurrentThread());
|
|
|
|
if (!context) return STATUS_INVALID_PARAMETER;
|
|
|
|
needed_flags = context->ContextFlags;
|
|
|
|
/* debug registers require a server call */
|
|
if (context->ContextFlags & (CONTEXT_DEBUG_REGISTERS & ~CONTEXT_AMD64)) self = FALSE;
|
|
|
|
if (!self)
|
|
{
|
|
context_t server_context;
|
|
unsigned int server_flags = get_server_context_flags( context->ContextFlags );
|
|
|
|
if ((ret = get_thread_context( handle, &server_context, server_flags, &self ))) return ret;
|
|
if ((ret = context_from_server( context, &server_context ))) return ret;
|
|
needed_flags &= ~context->ContextFlags;
|
|
}
|
|
|
|
if (self)
|
|
{
|
|
if (needed_flags)
|
|
{
|
|
CONTEXT ctx;
|
|
RtlCaptureContext( &ctx );
|
|
copy_context( context, &ctx, ctx.ContextFlags & needed_flags );
|
|
context->ContextFlags |= ctx.ContextFlags & needed_flags;
|
|
}
|
|
/* update the cached version of the debug registers */
|
|
if (context->ContextFlags & (CONTEXT_DEBUG_REGISTERS & ~CONTEXT_AMD64))
|
|
{
|
|
amd64_thread_data()->dr0 = context->Dr0;
|
|
amd64_thread_data()->dr1 = context->Dr1;
|
|
amd64_thread_data()->dr2 = context->Dr2;
|
|
amd64_thread_data()->dr3 = context->Dr3;
|
|
amd64_thread_data()->dr6 = context->Dr6;
|
|
amd64_thread_data()->dr7 = context->Dr7;
|
|
}
|
|
}
|
|
return STATUS_SUCCESS;
|
|
}
|
|
|
|
|
|
/**********************************************************************
|
|
* get_thread_ldt_entry
|
|
*/
|
|
NTSTATUS CDECL get_thread_ldt_entry( HANDLE handle, void *data, ULONG len, ULONG *ret_len )
|
|
{
|
|
return STATUS_NOT_IMPLEMENTED;
|
|
}
|
|
|
|
|
|
/******************************************************************************
|
|
* NtSetLdtEntries (NTDLL.@)
|
|
* ZwSetLdtEntries (NTDLL.@)
|
|
*/
|
|
NTSTATUS WINAPI NtSetLdtEntries( ULONG sel1, LDT_ENTRY entry1, ULONG sel2, LDT_ENTRY entry2 )
|
|
{
|
|
return STATUS_NOT_IMPLEMENTED;
|
|
}
|
|
|
|
|
|
/**********************************************************************
|
|
* signal_init_threading
|
|
*/
|
|
void signal_init_threading(void)
|
|
{
|
|
}
|
|
|
|
|
|
/**********************************************************************
|
|
* signal_alloc_thread
|
|
*/
|
|
NTSTATUS signal_alloc_thread( TEB *teb )
|
|
{
|
|
return STATUS_SUCCESS;
|
|
}
|
|
|
|
|
|
/**********************************************************************
|
|
* signal_free_thread
|
|
*/
|
|
void signal_free_thread( TEB *teb )
|
|
{
|
|
}
|
|
|
|
#ifdef __APPLE__
|
|
/**********************************************************************
|
|
* mac_thread_gsbase
|
|
*/
|
|
static void *mac_thread_gsbase(void)
|
|
{
|
|
struct thread_identifier_info tiinfo;
|
|
unsigned int info_count = THREAD_IDENTIFIER_INFO_COUNT;
|
|
static int gsbase_offset = -1;
|
|
|
|
kern_return_t kr = thread_info(mach_thread_self(), THREAD_IDENTIFIER_INFO, (thread_info_t) &tiinfo, &info_count);
|
|
if (kr == KERN_SUCCESS) return (void*)tiinfo.thread_handle;
|
|
|
|
if (gsbase_offset < 0)
|
|
{
|
|
/* Search for the array of TLS slots within the pthread data structure.
|
|
That's what the macOS pthread implementation uses for gsbase. */
|
|
const void* const sentinel1 = (const void*)0x2bffb6b4f11228ae;
|
|
const void* const sentinel2 = (const void*)0x0845a7ff6ab76707;
|
|
int rc;
|
|
pthread_key_t key;
|
|
const void** p = (const void**)pthread_self();
|
|
int i;
|
|
|
|
gsbase_offset = 0;
|
|
if ((rc = pthread_key_create(&key, NULL))) return NULL;
|
|
|
|
pthread_setspecific(key, sentinel1);
|
|
|
|
for (i = key + 1; i < 2000; i++) /* arbitrary limit */
|
|
{
|
|
if (p[i] == sentinel1)
|
|
{
|
|
pthread_setspecific(key, sentinel2);
|
|
|
|
if (p[i] == sentinel2)
|
|
{
|
|
gsbase_offset = (i - key) * sizeof(*p);
|
|
break;
|
|
}
|
|
|
|
pthread_setspecific(key, sentinel1);
|
|
}
|
|
}
|
|
|
|
pthread_key_delete(key);
|
|
}
|
|
|
|
if (gsbase_offset) return (char*)pthread_self() + gsbase_offset;
|
|
return NULL;
|
|
}
|
|
#endif
|
|
|
|
|
|
/**********************************************************************
|
|
* signal_init_thread
|
|
*/
|
|
void signal_init_thread( TEB *teb )
|
|
{
|
|
const WORD fpu_cw = 0x27f;
|
|
stack_t ss;
|
|
|
|
#if defined __linux__
|
|
arch_prctl( ARCH_SET_GS, teb );
|
|
#elif defined (__FreeBSD__) || defined (__FreeBSD_kernel__)
|
|
amd64_set_gsbase( teb );
|
|
#elif defined(__NetBSD__)
|
|
sysarch( X86_64_SET_GSBASE, &teb );
|
|
#elif defined (__APPLE__)
|
|
__asm__ volatile (".byte 0x65\n\tmovq %0,%c1"
|
|
:
|
|
: "r" (teb->Tib.Self), "n" (FIELD_OFFSET(TEB, Tib.Self)));
|
|
__asm__ volatile (".byte 0x65\n\tmovq %0,%c1"
|
|
:
|
|
: "r" (teb->ThreadLocalStoragePointer), "n" (FIELD_OFFSET(TEB, ThreadLocalStoragePointer)));
|
|
|
|
/* alloc_tls_slot() needs to poke a value to an address relative to each
|
|
thread's gsbase. Have each thread record its gsbase pointer into its
|
|
TEB so alloc_tls_slot() can find it. */
|
|
teb->Reserved5[0] = mac_thread_gsbase();
|
|
#else
|
|
# error Please define setting %gs for your architecture
|
|
#endif
|
|
|
|
ss.ss_sp = (char *)teb + teb_size;
|
|
ss.ss_size = signal_stack_size;
|
|
ss.ss_flags = 0;
|
|
if (sigaltstack(&ss, NULL) == -1) perror( "sigaltstack" );
|
|
|
|
#ifdef __GNUC__
|
|
__asm__ volatile ("fninit; fldcw %0" : : "m" (fpu_cw));
|
|
#else
|
|
FIXME("FPU setup not implemented for this platform.\n");
|
|
#endif
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* init_thread_context
|
|
*/
|
|
static void init_thread_context( CONTEXT *context, LPTHREAD_START_ROUTINE entry, void *arg, void *relay )
|
|
{
|
|
__asm__( "movw %%cs,%0" : "=m" (context->SegCs) );
|
|
__asm__( "movw %%ss,%0" : "=m" (context->SegSs) );
|
|
context->Rcx = (ULONG_PTR)entry;
|
|
context->Rdx = (ULONG_PTR)arg;
|
|
context->Rsp = (ULONG_PTR)NtCurrentTeb()->Tib.StackBase - 0x28;
|
|
context->Rip = (ULONG_PTR)relay;
|
|
context->EFlags = 0x200;
|
|
context->u.FltSave.ControlWord = 0x27f;
|
|
context->u.FltSave.MxCsr = context->MxCsr = 0x1f80;
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* attach_thread
|
|
*/
|
|
PCONTEXT DECLSPEC_HIDDEN attach_thread( LPTHREAD_START_ROUTINE entry, void *arg,
|
|
BOOL suspend, void *relay )
|
|
{
|
|
CONTEXT *ctx;
|
|
|
|
if (suspend)
|
|
{
|
|
CONTEXT context = { 0 };
|
|
|
|
context.ContextFlags = CONTEXT_ALL;
|
|
init_thread_context( &context, entry, arg, relay );
|
|
wait_suspend( &context );
|
|
ctx = (CONTEXT *)((ULONG_PTR)context.Rsp & ~15) - 1;
|
|
*ctx = context;
|
|
}
|
|
else
|
|
{
|
|
ctx = (CONTEXT *)((char *)NtCurrentTeb()->Tib.StackBase - 0x30) - 1;
|
|
init_thread_context( ctx, entry, arg, relay );
|
|
}
|
|
ctx->ContextFlags = CONTEXT_FULL;
|
|
LdrInitializeThunk( ctx, (void **)&ctx->Rcx, 0, 0 );
|
|
return ctx;
|
|
}
|
|
|
|
|
|
/***********************************************************************
|
|
* signal_start_thread
|
|
*/
|
|
__ASM_GLOBAL_FUNC( signal_start_thread,
|
|
"subq $56,%rsp\n\t"
|
|
__ASM_SEH(".seh_stackalloc 56\n\t")
|
|
__ASM_SEH(".seh_endprologue\n\t")
|
|
__ASM_CFI(".cfi_adjust_cfa_offset 56\n\t")
|
|
"movq %rbp,48(%rsp)\n\t"
|
|
__ASM_CFI(".cfi_rel_offset %rbp,48\n\t")
|
|
"movq %rbx,40(%rsp)\n\t"
|
|
__ASM_CFI(".cfi_rel_offset %rbx,40\n\t")
|
|
"movq %r12,32(%rsp)\n\t"
|
|
__ASM_CFI(".cfi_rel_offset %r12,32\n\t")
|
|
"movq %r13,24(%rsp)\n\t"
|
|
__ASM_CFI(".cfi_rel_offset %r13,24\n\t")
|
|
"movq %r14,16(%rsp)\n\t"
|
|
__ASM_CFI(".cfi_rel_offset %r14,16\n\t")
|
|
"movq %r15,8(%rsp)\n\t"
|
|
__ASM_CFI(".cfi_rel_offset %r15,8\n\t")
|
|
/* store exit frame */
|
|
"movq %gs:0x30,%rax\n\t"
|
|
"movq %rsp,0x330(%rax)\n\t" /* amd64_thread_data()->exit_frame */
|
|
/* switch to thread stack */
|
|
"movq 8(%rax),%rax\n\t" /* NtCurrentTeb()->Tib.StackBase */
|
|
"leaq -0x1000(%rax),%rsp\n\t"
|
|
/* attach dlls */
|
|
"call " __ASM_NAME("attach_thread") "\n\t"
|
|
"movq %rax,%rsp\n\t"
|
|
/* clear the stack */
|
|
"andq $~0xfff,%rax\n\t" /* round down to page size */
|
|
"movq %rax,%rdi\n\t"
|
|
"call " __ASM_NAME("virtual_clear_thread_stack") "\n\t"
|
|
/* switch to the initial context */
|
|
"movq %rsp,%rdi\n\t"
|
|
"call " __ASM_NAME("set_cpu_context") )
|
|
|
|
|
|
/***********************************************************************
|
|
* signal_exit_thread
|
|
*/
|
|
__ASM_GLOBAL_FUNC( signal_exit_thread,
|
|
/* fetch exit frame */
|
|
"movq %gs:0x30,%rax\n\t"
|
|
"movq 0x330(%rax),%rdx\n\t" /* amd64_thread_data()->exit_frame */
|
|
"testq %rdx,%rdx\n\t"
|
|
"jnz 1f\n\t"
|
|
"jmp *%rsi\n"
|
|
/* switch to exit frame stack */
|
|
"1:\tmovq $0,0x330(%rax)\n\t"
|
|
"movq %rdx,%rsp\n\t"
|
|
__ASM_CFI(".cfi_adjust_cfa_offset 56\n\t")
|
|
__ASM_CFI(".cfi_rel_offset %rbp,48\n\t")
|
|
__ASM_CFI(".cfi_rel_offset %rbx,40\n\t")
|
|
__ASM_CFI(".cfi_rel_offset %r12,32\n\t")
|
|
__ASM_CFI(".cfi_rel_offset %r13,24\n\t")
|
|
__ASM_CFI(".cfi_rel_offset %r14,16\n\t")
|
|
__ASM_CFI(".cfi_rel_offset %r15,8\n\t")
|
|
"call *%rsi" )
|
|
|
|
#endif /* __x86_64__ */
|