2002-02-21 21:22:00 +01:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2002, TransGaming Technologies Inc.
|
2002-03-10 00:29:33 +01:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
2006-05-18 14:49:52 +02:00
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
|
2002-02-21 21:22:00 +01:00
|
|
|
*/
|
|
|
|
|
2014-06-21 14:22:28 +02:00
|
|
|
#include "config.h"
|
|
|
|
#include "wine/port.h"
|
|
|
|
|
2003-09-06 01:08:26 +02:00
|
|
|
#include <stdarg.h>
|
|
|
|
|
2002-03-10 00:29:33 +01:00
|
|
|
#include "wine/debug.h"
|
2003-09-06 01:08:26 +02:00
|
|
|
#include "windef.h"
|
2002-02-21 21:22:00 +01:00
|
|
|
#include "winbase.h"
|
2014-06-21 14:22:28 +02:00
|
|
|
#include "winternl.h"
|
2018-02-02 01:03:52 +01:00
|
|
|
#include "wine/heap.h"
|
2014-06-21 14:22:28 +02:00
|
|
|
#include "msvcrt.h"
|
|
|
|
#include "cppexcept.h"
|
2008-12-17 14:45:18 +01:00
|
|
|
#include "mtdll.h"
|
2014-06-21 14:22:28 +02:00
|
|
|
#include "cxx.h"
|
2002-02-21 21:22:00 +01:00
|
|
|
|
2002-03-10 00:29:33 +01:00
|
|
|
WINE_DEFAULT_DEBUG_CHANNEL(msvcrt);
|
2002-02-21 21:22:00 +01:00
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
2002-06-01 01:06:46 +02:00
|
|
|
BOOL bInit;
|
2002-02-21 21:22:00 +01:00
|
|
|
CRITICAL_SECTION crit;
|
|
|
|
} LOCKTABLEENTRY;
|
|
|
|
|
|
|
|
static LOCKTABLEENTRY lock_table[ _TOTAL_LOCKS ];
|
|
|
|
|
|
|
|
static inline void msvcrt_mlock_set_entry_initialized( int locknum, BOOL initialized )
|
|
|
|
{
|
|
|
|
lock_table[ locknum ].bInit = initialized;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void msvcrt_initialize_mlock( int locknum )
|
|
|
|
{
|
|
|
|
InitializeCriticalSection( &(lock_table[ locknum ].crit) );
|
2007-03-10 22:09:17 +01:00
|
|
|
lock_table[ locknum ].crit.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": LOCKTABLEENTRY.crit");
|
2002-06-01 01:06:46 +02:00
|
|
|
msvcrt_mlock_set_entry_initialized( locknum, TRUE );
|
2002-02-21 21:22:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void msvcrt_uninitialize_mlock( int locknum )
|
|
|
|
{
|
2007-03-10 22:09:17 +01:00
|
|
|
lock_table[ locknum ].crit.DebugInfo->Spare[0] = 0;
|
2002-02-21 21:22:00 +01:00
|
|
|
DeleteCriticalSection( &(lock_table[ locknum ].crit) );
|
|
|
|
msvcrt_mlock_set_entry_initialized( locknum, FALSE );
|
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* msvcrt_init_mt_locks (internal)
|
|
|
|
*
|
2002-06-01 01:06:46 +02:00
|
|
|
* Initialize the table lock. All other locks will be initialized
|
2002-02-21 21:22:00 +01:00
|
|
|
* upon first use.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void msvcrt_init_mt_locks(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
TRACE( "initializing mtlocks\n" );
|
|
|
|
|
|
|
|
/* Initialize the table */
|
|
|
|
for( i=0; i < _TOTAL_LOCKS; i++ )
|
|
|
|
{
|
|
|
|
msvcrt_mlock_set_entry_initialized( i, FALSE );
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize our lock table lock */
|
2002-06-01 01:06:46 +02:00
|
|
|
msvcrt_initialize_mlock( _LOCKTAB_LOCK );
|
2002-02-21 21:22:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* _lock (MSVCRT.@)
|
|
|
|
*/
|
2006-06-13 11:21:19 +02:00
|
|
|
void CDECL _lock( int locknum )
|
2002-02-21 21:22:00 +01:00
|
|
|
{
|
|
|
|
TRACE( "(%d)\n", locknum );
|
|
|
|
|
|
|
|
/* If the lock doesn't exist yet, create it */
|
|
|
|
if( lock_table[ locknum ].bInit == FALSE )
|
|
|
|
{
|
|
|
|
/* Lock while we're changing the lock table */
|
|
|
|
_lock( _LOCKTAB_LOCK );
|
|
|
|
|
|
|
|
/* Check again if we've got a bit of a race on lock creation */
|
|
|
|
if( lock_table[ locknum ].bInit == FALSE )
|
|
|
|
{
|
|
|
|
TRACE( ": creating lock #%d\n", locknum );
|
|
|
|
msvcrt_initialize_mlock( locknum );
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlock ourselves */
|
|
|
|
_unlock( _LOCKTAB_LOCK );
|
|
|
|
}
|
|
|
|
|
2002-06-01 01:06:46 +02:00
|
|
|
EnterCriticalSection( &(lock_table[ locknum ].crit) );
|
2002-02-21 21:22:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**********************************************************************
|
|
|
|
* _unlock (MSVCRT.@)
|
|
|
|
*
|
|
|
|
* NOTE: There is no error detection to make sure the lock exists and is acquired.
|
|
|
|
*/
|
2006-06-13 11:21:19 +02:00
|
|
|
void CDECL _unlock( int locknum )
|
2002-02-21 21:22:00 +01:00
|
|
|
{
|
|
|
|
TRACE( "(%d)\n", locknum );
|
|
|
|
|
|
|
|
LeaveCriticalSection( &(lock_table[ locknum ].crit) );
|
|
|
|
}
|
2014-06-21 14:22:28 +02:00
|
|
|
|
|
|
|
#if _MSVCR_VER >= 100
|
|
|
|
typedef enum
|
|
|
|
{
|
|
|
|
SPINWAIT_INIT,
|
|
|
|
SPINWAIT_SPIN,
|
|
|
|
SPINWAIT_YIELD,
|
|
|
|
SPINWAIT_DONE
|
|
|
|
} SpinWait_state;
|
|
|
|
|
|
|
|
typedef void (__cdecl *yield_func)(void);
|
|
|
|
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
ULONG spin;
|
|
|
|
ULONG unknown;
|
|
|
|
SpinWait_state state;
|
|
|
|
yield_func yield_func;
|
|
|
|
} SpinWait;
|
|
|
|
|
|
|
|
/* ?_Value@_SpinCount@details@Concurrency@@SAIXZ */
|
|
|
|
unsigned int __cdecl SpinCount__Value(void)
|
|
|
|
{
|
|
|
|
static unsigned int val = -1;
|
|
|
|
|
|
|
|
TRACE("()\n");
|
|
|
|
|
|
|
|
if(val == -1) {
|
|
|
|
SYSTEM_INFO si;
|
|
|
|
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
val = si.dwNumberOfProcessors>1 ? 4000 : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??0?$_SpinWait@$00@details@Concurrency@@QAE@P6AXXZ@Z */
|
|
|
|
/* ??0?$_SpinWait@$00@details@Concurrency@@QEAA@P6AXXZ@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait_ctor_yield, 8)
|
|
|
|
SpinWait* __thiscall SpinWait_ctor_yield(SpinWait *this, yield_func yf)
|
|
|
|
{
|
|
|
|
TRACE("(%p %p)\n", this, yf);
|
|
|
|
|
|
|
|
this->state = SPINWAIT_INIT;
|
|
|
|
this->unknown = 1;
|
|
|
|
this->yield_func = yf;
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??0?$_SpinWait@$0A@@details@Concurrency@@QAE@P6AXXZ@Z */
|
|
|
|
/* ??0?$_SpinWait@$0A@@details@Concurrency@@QEAA@P6AXXZ@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait_ctor, 8)
|
|
|
|
SpinWait* __thiscall SpinWait_ctor(SpinWait *this, yield_func yf)
|
|
|
|
{
|
|
|
|
TRACE("(%p %p)\n", this, yf);
|
|
|
|
|
|
|
|
this->state = SPINWAIT_INIT;
|
|
|
|
this->unknown = 0;
|
|
|
|
this->yield_func = yf;
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??_F?$_SpinWait@$00@details@Concurrency@@QAEXXZ */
|
|
|
|
/* ??_F?$_SpinWait@$00@details@Concurrency@@QEAAXXZ */
|
|
|
|
/* ??_F?$_SpinWait@$0A@@details@Concurrency@@QAEXXZ */
|
|
|
|
/* ??_F?$_SpinWait@$0A@@details@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait_dtor, 4)
|
|
|
|
void __thiscall SpinWait_dtor(SpinWait *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
|
|
|
|
/* ?_DoYield@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
|
|
|
|
/* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
|
|
|
|
/* ?_DoYield@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait__DoYield, 4)
|
|
|
|
void __thiscall SpinWait__DoYield(SpinWait *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if(this->unknown)
|
|
|
|
this->yield_func();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IAEKXZ */
|
|
|
|
/* ?_NumberOfSpins@?$_SpinWait@$00@details@Concurrency@@IEAAKXZ */
|
|
|
|
/* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IAEKXZ */
|
|
|
|
/* ?_NumberOfSpins@?$_SpinWait@$0A@@details@Concurrency@@IEAAKXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait__NumberOfSpins, 4)
|
|
|
|
ULONG __thiscall SpinWait__NumberOfSpins(SpinWait *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QAEXI@Z */
|
|
|
|
/* ?_SetSpinCount@?$_SpinWait@$00@details@Concurrency@@QEAAXI@Z */
|
|
|
|
/* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QAEXI@Z */
|
|
|
|
/* ?_SetSpinCount@?$_SpinWait@$0A@@details@Concurrency@@QEAAXI@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait__SetSpinCount, 8)
|
|
|
|
void __thiscall SpinWait__SetSpinCount(SpinWait *this, unsigned int spin)
|
|
|
|
{
|
|
|
|
TRACE("(%p %d)\n", this, spin);
|
|
|
|
|
|
|
|
this->spin = spin;
|
|
|
|
this->state = spin ? SPINWAIT_SPIN : SPINWAIT_YIELD;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IAEXXZ */
|
|
|
|
/* ?_Reset@?$_SpinWait@$00@details@Concurrency@@IEAAXXZ */
|
|
|
|
/* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IAEXXZ */
|
|
|
|
/* ?_Reset@?$_SpinWait@$0A@@details@Concurrency@@IEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait__Reset, 4)
|
|
|
|
void __thiscall SpinWait__Reset(SpinWait *this)
|
|
|
|
{
|
|
|
|
SpinWait__SetSpinCount(this, SpinCount__Value());
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IAE_NXZ */
|
|
|
|
/* ?_ShouldSpinAgain@?$_SpinWait@$00@details@Concurrency@@IEAA_NXZ */
|
|
|
|
/* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IAE_NXZ */
|
|
|
|
/* ?_ShouldSpinAgain@?$_SpinWait@$0A@@details@Concurrency@@IEAA_NXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait__ShouldSpinAgain, 4)
|
|
|
|
MSVCRT_bool __thiscall SpinWait__ShouldSpinAgain(SpinWait *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
this->spin--;
|
|
|
|
return this->spin > 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QAE_NXZ */
|
|
|
|
/* ?_SpinOnce@?$_SpinWait@$00@details@Concurrency@@QEAA_NXZ */
|
|
|
|
/* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QAE_NXZ */
|
|
|
|
/* ?_SpinOnce@?$_SpinWait@$0A@@details@Concurrency@@QEAA_NXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(SpinWait__SpinOnce, 4)
|
|
|
|
MSVCRT_bool __thiscall SpinWait__SpinOnce(SpinWait *this)
|
|
|
|
{
|
|
|
|
switch(this->state) {
|
|
|
|
case SPINWAIT_INIT:
|
|
|
|
SpinWait__Reset(this);
|
|
|
|
/* fall through */
|
|
|
|
case SPINWAIT_SPIN:
|
2017-02-28 11:21:34 +01:00
|
|
|
InterlockedDecrement((LONG*)&this->spin);
|
2014-06-21 14:22:28 +02:00
|
|
|
if(!this->spin)
|
|
|
|
this->state = this->unknown ? SPINWAIT_YIELD : SPINWAIT_DONE;
|
|
|
|
return TRUE;
|
|
|
|
case SPINWAIT_YIELD:
|
|
|
|
this->state = SPINWAIT_DONE;
|
|
|
|
this->yield_func();
|
|
|
|
return TRUE;
|
|
|
|
default:
|
|
|
|
SpinWait__Reset(this);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
}
|
2014-06-24 11:57:37 +02:00
|
|
|
|
2014-06-24 11:57:47 +02:00
|
|
|
static HANDLE keyed_event;
|
|
|
|
|
2014-06-26 13:46:09 +02:00
|
|
|
/* keep in sync with msvcp90/msvcp90.h */
|
2014-06-24 11:57:47 +02:00
|
|
|
typedef struct cs_queue
|
|
|
|
{
|
|
|
|
struct cs_queue *next;
|
2014-06-24 11:57:53 +02:00
|
|
|
#if _MSVCR_VER >= 110
|
|
|
|
BOOL free;
|
|
|
|
int unknown;
|
|
|
|
#endif
|
2014-06-24 11:57:47 +02:00
|
|
|
} cs_queue;
|
|
|
|
|
2014-06-24 11:57:37 +02:00
|
|
|
typedef struct
|
|
|
|
{
|
2014-06-24 11:57:47 +02:00
|
|
|
ULONG_PTR unk_thread_id;
|
|
|
|
cs_queue unk_active;
|
|
|
|
#if _MSVCR_VER >= 110
|
|
|
|
void *unknown[2];
|
|
|
|
#else
|
|
|
|
void *unknown[1];
|
|
|
|
#endif
|
|
|
|
cs_queue *head;
|
2014-06-24 11:57:37 +02:00
|
|
|
void *tail;
|
|
|
|
} critical_section;
|
|
|
|
|
|
|
|
/* ??0critical_section@Concurrency@@QAE@XZ */
|
|
|
|
/* ??0critical_section@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_ctor, 4)
|
|
|
|
critical_section* __thiscall critical_section_ctor(critical_section *this)
|
|
|
|
{
|
2014-06-24 11:57:47 +02:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if(!keyed_event) {
|
|
|
|
HANDLE event;
|
|
|
|
|
|
|
|
NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
|
|
|
|
if(InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
|
|
|
|
NtClose(event);
|
|
|
|
}
|
|
|
|
|
|
|
|
this->unk_thread_id = 0;
|
|
|
|
this->head = this->tail = NULL;
|
2014-06-24 11:57:37 +02:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1critical_section@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1critical_section@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_dtor, 4)
|
|
|
|
void __thiscall critical_section_dtor(critical_section *this)
|
|
|
|
{
|
2014-06-24 11:57:47 +02:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __cdecl spin_wait_yield(void)
|
|
|
|
{
|
|
|
|
Sleep(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void spin_wait_for_next_cs(cs_queue *q)
|
|
|
|
{
|
|
|
|
SpinWait sw;
|
|
|
|
|
|
|
|
if(q->next) return;
|
|
|
|
|
|
|
|
SpinWait_ctor(&sw, &spin_wait_yield);
|
|
|
|
SpinWait__Reset(&sw);
|
|
|
|
while(!q->next)
|
|
|
|
SpinWait__SpinOnce(&sw);
|
|
|
|
SpinWait_dtor(&sw);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void cs_set_head(critical_section *cs, cs_queue *q)
|
|
|
|
{
|
|
|
|
cs->unk_thread_id = GetCurrentThreadId();
|
|
|
|
cs->unk_active.next = q->next;
|
|
|
|
cs->head = &cs->unk_active;
|
2014-06-24 11:57:37 +02:00
|
|
|
}
|
|
|
|
|
2018-02-02 16:36:18 +01:00
|
|
|
static inline void cs_lock(critical_section *cs, cs_queue *q)
|
2014-06-24 11:57:37 +02:00
|
|
|
{
|
2018-02-02 16:36:18 +01:00
|
|
|
cs_queue *last;
|
2014-06-24 11:57:47 +02:00
|
|
|
|
2018-02-02 16:36:18 +01:00
|
|
|
if(cs->unk_thread_id == GetCurrentThreadId())
|
2017-03-27 10:28:29 +02:00
|
|
|
throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
|
2014-06-24 11:57:47 +02:00
|
|
|
|
2018-02-02 16:36:18 +01:00
|
|
|
memset(q, 0, sizeof(*q));
|
|
|
|
last = InterlockedExchangePointer(&cs->tail, q);
|
2014-06-24 11:57:47 +02:00
|
|
|
if(last) {
|
2018-02-02 16:36:18 +01:00
|
|
|
last->next = q;
|
|
|
|
NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
|
2014-06-24 11:57:47 +02:00
|
|
|
}
|
|
|
|
|
2018-02-02 16:36:18 +01:00
|
|
|
cs_set_head(cs, q);
|
|
|
|
if(InterlockedCompareExchangePointer(&cs->tail, &cs->unk_active, q) != q) {
|
|
|
|
spin_wait_for_next_cs(q);
|
|
|
|
cs->unk_active.next = q->next;
|
2014-12-31 00:03:36 +01:00
|
|
|
}
|
2014-06-24 11:57:37 +02:00
|
|
|
}
|
|
|
|
|
2018-02-02 16:36:18 +01:00
|
|
|
/* ?lock@critical_section@Concurrency@@QAEXXZ */
|
|
|
|
/* ?lock@critical_section@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_lock, 4)
|
|
|
|
void __thiscall critical_section_lock(critical_section *this)
|
|
|
|
{
|
|
|
|
cs_queue q;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
cs_lock(this, &q);
|
|
|
|
}
|
|
|
|
|
2014-06-24 11:57:37 +02:00
|
|
|
/* ?try_lock@critical_section@Concurrency@@QAE_NXZ */
|
|
|
|
/* ?try_lock@critical_section@Concurrency@@QEAA_NXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_try_lock, 4)
|
|
|
|
MSVCRT_bool __thiscall critical_section_try_lock(critical_section *this)
|
|
|
|
{
|
2014-06-24 11:57:47 +02:00
|
|
|
cs_queue q;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
2017-03-27 10:28:29 +02:00
|
|
|
if(this->unk_thread_id == GetCurrentThreadId())
|
2014-06-24 11:57:47 +02:00
|
|
|
return FALSE;
|
|
|
|
|
2014-06-24 11:57:53 +02:00
|
|
|
memset(&q, 0, sizeof(q));
|
2014-06-24 11:57:47 +02:00
|
|
|
if(!InterlockedCompareExchangePointer(&this->tail, &q, NULL)) {
|
|
|
|
cs_set_head(this, &q);
|
2014-12-31 00:03:36 +01:00
|
|
|
if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, &q) != &q) {
|
|
|
|
spin_wait_for_next_cs(&q);
|
|
|
|
this->unk_active.next = q.next;
|
|
|
|
}
|
2014-06-24 11:57:47 +02:00
|
|
|
return TRUE;
|
|
|
|
}
|
2014-06-24 11:57:37 +02:00
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?unlock@critical_section@Concurrency@@QAEXXZ */
|
|
|
|
/* ?unlock@critical_section@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_unlock, 4)
|
|
|
|
void __thiscall critical_section_unlock(critical_section *this)
|
|
|
|
{
|
2014-06-24 11:57:47 +02:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
this->unk_thread_id = 0;
|
|
|
|
this->head = NULL;
|
|
|
|
if(InterlockedCompareExchangePointer(&this->tail, NULL, &this->unk_active)
|
|
|
|
== &this->unk_active) return;
|
|
|
|
spin_wait_for_next_cs(&this->unk_active);
|
|
|
|
|
2014-06-24 11:57:53 +02:00
|
|
|
#if _MSVCR_VER >= 110
|
|
|
|
while(1) {
|
|
|
|
cs_queue *next;
|
|
|
|
|
|
|
|
if(!InterlockedExchange(&this->unk_active.next->free, TRUE))
|
|
|
|
break;
|
|
|
|
|
|
|
|
next = this->unk_active.next;
|
2014-12-31 00:03:36 +01:00
|
|
|
if(InterlockedCompareExchangePointer(&this->tail, NULL, next) == next) {
|
|
|
|
HeapFree(GetProcessHeap(), 0, next);
|
2014-06-24 11:57:53 +02:00
|
|
|
return;
|
2014-12-31 00:03:36 +01:00
|
|
|
}
|
2014-06-24 11:57:53 +02:00
|
|
|
spin_wait_for_next_cs(next);
|
|
|
|
|
|
|
|
this->unk_active.next = next->next;
|
|
|
|
HeapFree(GetProcessHeap(), 0, next);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-24 11:57:47 +02:00
|
|
|
NtReleaseKeyedEvent(keyed_event, this->unk_active.next, 0, NULL);
|
2014-06-24 11:57:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?native_handle@critical_section@Concurrency@@QAEAAV12@XZ */
|
|
|
|
/* ?native_handle@critical_section@Concurrency@@QEAAAEAV12@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_native_handle, 4)
|
|
|
|
critical_section* __thiscall critical_section_native_handle(critical_section *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
return this;
|
|
|
|
}
|
2014-06-24 11:57:43 +02:00
|
|
|
|
2014-06-24 11:57:53 +02:00
|
|
|
#if _MSVCR_VER >= 110
|
|
|
|
/* ?try_lock_for@critical_section@Concurrency@@QAE_NI@Z */
|
|
|
|
/* ?try_lock_for@critical_section@Concurrency@@QEAA_NI@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_try_lock_for, 8)
|
|
|
|
MSVCRT_bool __thiscall critical_section_try_lock_for(
|
|
|
|
critical_section *this, unsigned int timeout)
|
|
|
|
{
|
|
|
|
cs_queue *q, *last;
|
|
|
|
|
|
|
|
TRACE("(%p %d)\n", this, timeout);
|
|
|
|
|
2017-03-27 10:28:29 +02:00
|
|
|
if(this->unk_thread_id == GetCurrentThreadId())
|
|
|
|
throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
|
2014-06-24 11:57:53 +02:00
|
|
|
|
|
|
|
if(!(q = HeapAlloc(GetProcessHeap(), HEAP_ZERO_MEMORY, sizeof(*q))))
|
|
|
|
return critical_section_try_lock(this);
|
|
|
|
|
|
|
|
last = InterlockedExchangePointer(&this->tail, q);
|
|
|
|
if(last) {
|
|
|
|
LARGE_INTEGER to;
|
|
|
|
NTSTATUS status;
|
|
|
|
FILETIME ft;
|
|
|
|
|
|
|
|
last->next = q;
|
|
|
|
GetSystemTimeAsFileTime(&ft);
|
|
|
|
to.QuadPart = ((LONGLONG)ft.dwHighDateTime<<32) +
|
|
|
|
ft.dwLowDateTime + (LONGLONG)timeout*10000;
|
|
|
|
status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
|
|
|
|
if(status == STATUS_TIMEOUT) {
|
|
|
|
if(!InterlockedExchange(&q->free, TRUE))
|
|
|
|
return FALSE;
|
2014-12-31 00:03:36 +01:00
|
|
|
/* A thread has signaled the event and is block waiting. */
|
|
|
|
/* We need to catch the event to wake the thread. */
|
|
|
|
NtWaitForKeyedEvent(keyed_event, q, 0, NULL);
|
2014-06-24 11:57:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-31 00:03:36 +01:00
|
|
|
cs_set_head(this, q);
|
|
|
|
if(InterlockedCompareExchangePointer(&this->tail, &this->unk_active, q) != q) {
|
2014-06-24 11:57:53 +02:00
|
|
|
spin_wait_for_next_cs(q);
|
2014-12-31 00:03:36 +01:00
|
|
|
this->unk_active.next = q->next;
|
|
|
|
}
|
2014-06-24 11:57:53 +02:00
|
|
|
|
|
|
|
HeapFree(GetProcessHeap(), 0, q);
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-24 11:57:43 +02:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
critical_section *cs;
|
2018-02-02 16:36:18 +01:00
|
|
|
union {
|
|
|
|
cs_queue q;
|
|
|
|
struct {
|
|
|
|
void *unknown[4];
|
|
|
|
int unknown2[2];
|
|
|
|
} unknown;
|
|
|
|
} lock;
|
2014-06-24 11:57:43 +02:00
|
|
|
} critical_section_scoped_lock;
|
|
|
|
|
|
|
|
/* ??0scoped_lock@critical_section@Concurrency@@QAE@AAV12@@Z */
|
|
|
|
/* ??0scoped_lock@critical_section@Concurrency@@QEAA@AEAV12@@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_ctor, 8)
|
|
|
|
critical_section_scoped_lock* __thiscall critical_section_scoped_lock_ctor(
|
|
|
|
critical_section_scoped_lock *this, critical_section *cs)
|
|
|
|
{
|
|
|
|
TRACE("(%p %p)\n", this, cs);
|
|
|
|
this->cs = cs;
|
2018-02-02 16:36:18 +01:00
|
|
|
cs_lock(this->cs, &this->lock.q);
|
2014-06-24 11:57:43 +02:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1scoped_lock@critical_section@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1scoped_lock@critical_section@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(critical_section_scoped_lock_dtor, 4)
|
|
|
|
void __thiscall critical_section_scoped_lock_dtor(critical_section_scoped_lock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
critical_section_unlock(this->cs);
|
|
|
|
}
|
2015-07-24 09:13:19 +02:00
|
|
|
|
2018-02-02 16:36:12 +01:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
critical_section cs;
|
|
|
|
} _NonReentrantPPLLock;
|
|
|
|
|
|
|
|
/* ??0_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
|
|
|
|
/* ??0_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock_ctor, 4)
|
|
|
|
_NonReentrantPPLLock* __thiscall _NonReentrantPPLLock_ctor(_NonReentrantPPLLock *this)
|
|
|
|
{
|
2018-02-02 16:36:22 +01:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
critical_section_ctor(&this->cs);
|
2018-02-02 16:36:12 +01:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
|
|
|
|
/* ?_Acquire@_NonReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Acquire, 8)
|
|
|
|
void __thiscall _NonReentrantPPLLock__Acquire(_NonReentrantPPLLock *this, cs_queue *q)
|
|
|
|
{
|
2018-02-02 16:36:22 +01:00
|
|
|
TRACE("(%p %p)\n", this, q);
|
|
|
|
cs_lock(&this->cs, q);
|
2018-02-02 16:36:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QAEXXZ */
|
|
|
|
/* ?_Release@_NonReentrantPPLLock@details@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Release, 4)
|
|
|
|
void __thiscall _NonReentrantPPLLock__Release(_NonReentrantPPLLock *this)
|
|
|
|
{
|
2018-02-02 16:36:22 +01:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
critical_section_unlock(&this->cs);
|
2018-02-02 16:36:12 +01:00
|
|
|
}
|
|
|
|
|
2018-10-03 17:23:50 +02:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
_NonReentrantPPLLock *lock;
|
|
|
|
union {
|
|
|
|
cs_queue q;
|
|
|
|
struct {
|
|
|
|
void *unknown[4];
|
|
|
|
int unknown2[2];
|
|
|
|
} unknown;
|
|
|
|
} wait;
|
|
|
|
} _NonReentrantPPLLock__Scoped_lock;
|
|
|
|
|
|
|
|
/* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
|
|
|
|
/* ??0_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_ctor, 8)
|
|
|
|
_NonReentrantPPLLock__Scoped_lock* __thiscall _NonReentrantPPLLock__Scoped_lock_ctor(
|
|
|
|
_NonReentrantPPLLock__Scoped_lock *this, _NonReentrantPPLLock *lock)
|
|
|
|
{
|
|
|
|
TRACE("(%p %p)\n", this, lock);
|
|
|
|
|
|
|
|
this->lock = lock;
|
|
|
|
_NonReentrantPPLLock__Acquire(this->lock, &this->wait.q);
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1_Scoped_lock@_NonReentrantPPLLock@details@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_NonReentrantPPLLock__Scoped_lock_dtor, 4)
|
|
|
|
void __thiscall _NonReentrantPPLLock__Scoped_lock_dtor(_NonReentrantPPLLock__Scoped_lock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
_NonReentrantPPLLock__Release(this->lock);
|
|
|
|
}
|
|
|
|
|
2018-02-02 16:36:26 +01:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
critical_section cs;
|
|
|
|
LONG count;
|
|
|
|
LONG owner;
|
|
|
|
} _ReentrantPPLLock;
|
|
|
|
|
|
|
|
/* ??0_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
|
|
|
|
/* ??0_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock_ctor, 4)
|
|
|
|
_ReentrantPPLLock* __thiscall _ReentrantPPLLock_ctor(_ReentrantPPLLock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
critical_section_ctor(&this->cs);
|
|
|
|
this->count = 0;
|
|
|
|
this->owner = -1;
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QAEXPAX@Z */
|
|
|
|
/* ?_Acquire@_ReentrantPPLLock@details@Concurrency@@QEAAXPEAX@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Acquire, 8)
|
|
|
|
void __thiscall _ReentrantPPLLock__Acquire(_ReentrantPPLLock *this, cs_queue *q)
|
|
|
|
{
|
|
|
|
TRACE("(%p %p)\n", this, q);
|
|
|
|
|
|
|
|
if(this->owner == GetCurrentThreadId()) {
|
|
|
|
this->count++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
cs_lock(&this->cs, q);
|
|
|
|
this->count++;
|
|
|
|
this->owner = GetCurrentThreadId();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_Release@_ReentrantPPLLock@details@Concurrency@@QAEXXZ */
|
|
|
|
/* ?_Release@_ReentrantPPLLock@details@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Release, 4)
|
|
|
|
void __thiscall _ReentrantPPLLock__Release(_ReentrantPPLLock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
this->count--;
|
|
|
|
if(this->count)
|
|
|
|
return;
|
|
|
|
|
|
|
|
this->owner = -1;
|
|
|
|
critical_section_unlock(&this->cs);
|
|
|
|
}
|
|
|
|
|
2018-02-06 17:03:59 +01:00
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
_ReentrantPPLLock *lock;
|
|
|
|
union {
|
|
|
|
cs_queue q;
|
|
|
|
struct {
|
|
|
|
void *unknown[4];
|
|
|
|
int unknown2[2];
|
|
|
|
} unknown;
|
|
|
|
} wait;
|
|
|
|
} _ReentrantPPLLock__Scoped_lock;
|
|
|
|
|
|
|
|
/* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@AAV123@@Z */
|
|
|
|
/* ??0_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@AEAV123@@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_ctor, 8)
|
|
|
|
_ReentrantPPLLock__Scoped_lock* __thiscall _ReentrantPPLLock__Scoped_lock_ctor(
|
|
|
|
_ReentrantPPLLock__Scoped_lock *this, _ReentrantPPLLock *lock)
|
|
|
|
{
|
|
|
|
TRACE("(%p %p)\n", this, lock);
|
|
|
|
|
|
|
|
this->lock = lock;
|
|
|
|
_ReentrantPPLLock__Acquire(this->lock, &this->wait.q);
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1_Scoped_lock@_ReentrantPPLLock@details@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantPPLLock__Scoped_lock_dtor, 4)
|
|
|
|
void __thiscall _ReentrantPPLLock__Scoped_lock_dtor(_ReentrantPPLLock__Scoped_lock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
_ReentrantPPLLock__Release(this->lock);
|
|
|
|
}
|
|
|
|
|
2015-07-24 09:13:19 +02:00
|
|
|
/* ?_GetConcurrency@details@Concurrency@@YAIXZ */
|
|
|
|
unsigned int __cdecl _GetConcurrency(void)
|
|
|
|
{
|
|
|
|
static unsigned int val = -1;
|
|
|
|
|
|
|
|
TRACE("()\n");
|
|
|
|
|
|
|
|
if(val == -1) {
|
|
|
|
SYSTEM_INFO si;
|
|
|
|
|
|
|
|
GetSystemInfo(&si);
|
|
|
|
val = si.dwNumberOfProcessors;
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2016-12-13 01:44:44 +01:00
|
|
|
#define EVT_RUNNING (void*)1
|
|
|
|
#define EVT_WAITING NULL
|
|
|
|
|
|
|
|
struct thread_wait;
|
|
|
|
typedef struct thread_wait_entry
|
|
|
|
{
|
|
|
|
struct thread_wait *wait;
|
|
|
|
struct thread_wait_entry *next;
|
|
|
|
struct thread_wait_entry *prev;
|
|
|
|
} thread_wait_entry;
|
|
|
|
|
|
|
|
typedef struct thread_wait
|
|
|
|
{
|
|
|
|
void *signaled;
|
|
|
|
int pending_waits;
|
|
|
|
thread_wait_entry entries[1];
|
|
|
|
} thread_wait;
|
|
|
|
|
2016-12-12 22:21:47 +01:00
|
|
|
typedef struct
|
|
|
|
{
|
2016-12-13 01:44:44 +01:00
|
|
|
thread_wait_entry *waiters;
|
|
|
|
INT_PTR signaled;
|
2016-12-12 22:21:47 +01:00
|
|
|
critical_section cs;
|
|
|
|
} event;
|
|
|
|
|
2016-12-13 01:44:44 +01:00
|
|
|
static inline PLARGE_INTEGER evt_timeout(PLARGE_INTEGER pTime, unsigned int timeout)
|
|
|
|
{
|
|
|
|
if(timeout == COOPERATIVE_TIMEOUT_INFINITE) return NULL;
|
|
|
|
pTime->QuadPart = (ULONGLONG)timeout * -10000;
|
|
|
|
return pTime;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void evt_add_queue(thread_wait_entry **head, thread_wait_entry *entry)
|
|
|
|
{
|
2017-03-20 10:42:38 +01:00
|
|
|
entry->next = *head;
|
|
|
|
entry->prev = NULL;
|
|
|
|
if(*head) (*head)->prev = entry;
|
|
|
|
*head = entry;
|
2016-12-13 01:44:44 +01:00
|
|
|
}
|
|
|
|
|
2017-03-20 10:42:38 +01:00
|
|
|
static void evt_remove_queue(thread_wait_entry **head, thread_wait_entry *entry)
|
2016-12-13 01:44:44 +01:00
|
|
|
{
|
2017-03-20 10:42:38 +01:00
|
|
|
if(entry == *head)
|
|
|
|
*head = entry->next;
|
|
|
|
else if(entry->prev)
|
|
|
|
entry->prev->next = entry->next;
|
|
|
|
if(entry->next) entry->next->prev = entry->prev;
|
2016-12-13 01:44:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static MSVCRT_size_t evt_end_wait(thread_wait *wait, event **events, int count)
|
|
|
|
{
|
|
|
|
MSVCRT_size_t i, ret = COOPERATIVE_WAIT_TIMEOUT;
|
|
|
|
|
|
|
|
for(i = 0; i < count; i++) {
|
|
|
|
critical_section_lock(&events[i]->cs);
|
|
|
|
if(events[i] == wait->signaled) ret = i;
|
2017-03-20 10:42:38 +01:00
|
|
|
evt_remove_queue(&events[i]->waiters, &wait->entries[i]);
|
2016-12-13 01:44:44 +01:00
|
|
|
critical_section_unlock(&events[i]->cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int evt_transition(void **state, void *from, void *to)
|
|
|
|
{
|
|
|
|
return InterlockedCompareExchangePointer(state, to, from) == from;
|
|
|
|
}
|
|
|
|
|
|
|
|
static MSVCRT_size_t evt_wait(thread_wait *wait, event **events, int count, MSVCRT_bool wait_all, unsigned int timeout)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
NTSTATUS status;
|
|
|
|
LARGE_INTEGER ntto;
|
|
|
|
|
|
|
|
wait->signaled = EVT_RUNNING;
|
|
|
|
wait->pending_waits = wait_all ? count : 1;
|
|
|
|
for(i = 0; i < count; i++) {
|
|
|
|
wait->entries[i].wait = wait;
|
|
|
|
|
|
|
|
critical_section_lock(&events[i]->cs);
|
|
|
|
evt_add_queue(&events[i]->waiters, &wait->entries[i]);
|
|
|
|
if(events[i]->signaled) {
|
|
|
|
if(!InterlockedDecrement(&wait->pending_waits)) {
|
|
|
|
wait->signaled = events[i];
|
|
|
|
critical_section_unlock(&events[i]->cs);
|
|
|
|
|
|
|
|
return evt_end_wait(wait, events, i+1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
critical_section_unlock(&events[i]->cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
if(!timeout)
|
|
|
|
return evt_end_wait(wait, events, count);
|
|
|
|
|
|
|
|
if(!evt_transition(&wait->signaled, EVT_RUNNING, EVT_WAITING))
|
|
|
|
return evt_end_wait(wait, events, count);
|
|
|
|
|
|
|
|
status = NtWaitForKeyedEvent(keyed_event, wait, 0, evt_timeout(&ntto, timeout));
|
|
|
|
|
|
|
|
if(status && !evt_transition(&wait->signaled, EVT_WAITING, EVT_RUNNING))
|
|
|
|
NtWaitForKeyedEvent(keyed_event, wait, 0, NULL);
|
|
|
|
|
|
|
|
return evt_end_wait(wait, events, count);
|
|
|
|
}
|
|
|
|
|
2016-12-12 22:21:47 +01:00
|
|
|
/* ??0event@Concurrency@@QAE@XZ */
|
|
|
|
/* ??0event@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(event_ctor, 4)
|
|
|
|
event* __thiscall event_ctor(event *this)
|
|
|
|
{
|
2016-12-13 01:44:44 +01:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
this->waiters = NULL;
|
|
|
|
this->signaled = FALSE;
|
|
|
|
critical_section_ctor(&this->cs);
|
|
|
|
|
2016-12-12 22:21:47 +01:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1event@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1event@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(event_dtor, 4)
|
|
|
|
void __thiscall event_dtor(event *this)
|
|
|
|
{
|
2016-12-13 01:44:44 +01:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
critical_section_dtor(&this->cs);
|
|
|
|
if(this->waiters)
|
|
|
|
ERR("there's a wait on destroyed event\n");
|
2016-12-12 22:21:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?reset@event@Concurrency@@QAEXXZ */
|
|
|
|
/* ?reset@event@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(event_reset, 4)
|
|
|
|
void __thiscall event_reset(event *this)
|
|
|
|
{
|
2016-12-13 01:44:44 +01:00
|
|
|
thread_wait_entry *entry;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
critical_section_lock(&this->cs);
|
|
|
|
if(this->signaled) {
|
|
|
|
this->signaled = FALSE;
|
2017-03-20 10:42:38 +01:00
|
|
|
for(entry=this->waiters; entry; entry = entry->next)
|
|
|
|
InterlockedIncrement(&entry->wait->pending_waits);
|
2016-12-13 01:44:44 +01:00
|
|
|
}
|
|
|
|
critical_section_unlock(&this->cs);
|
2016-12-12 22:21:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?set@event@Concurrency@@QAEXXZ */
|
|
|
|
/* ?set@event@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(event_set, 4)
|
|
|
|
void __thiscall event_set(event *this)
|
|
|
|
{
|
2017-03-20 10:42:38 +01:00
|
|
|
thread_wait_entry *wakeup = NULL;
|
|
|
|
thread_wait_entry *entry, *next;
|
2016-12-13 01:44:44 +01:00
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
critical_section_lock(&this->cs);
|
|
|
|
if(!this->signaled) {
|
|
|
|
this->signaled = TRUE;
|
2017-03-20 10:42:38 +01:00
|
|
|
for(entry=this->waiters; entry; entry=next) {
|
|
|
|
next = entry->next;
|
|
|
|
if(!InterlockedDecrement(&entry->wait->pending_waits)) {
|
|
|
|
if(InterlockedExchangePointer(&entry->wait->signaled, this) == EVT_WAITING) {
|
|
|
|
evt_remove_queue(&this->waiters, entry);
|
|
|
|
evt_add_queue(&wakeup, entry);
|
2016-12-13 01:44:44 +01:00
|
|
|
}
|
2017-03-20 10:42:38 +01:00
|
|
|
}
|
2016-12-13 01:44:44 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
critical_section_unlock(&this->cs);
|
2017-03-20 10:42:38 +01:00
|
|
|
|
|
|
|
for(entry=wakeup; entry; entry=next) {
|
|
|
|
next = entry->next;
|
|
|
|
entry->next = entry->prev = NULL;
|
|
|
|
NtReleaseKeyedEvent(keyed_event, entry->wait, 0, NULL);
|
|
|
|
}
|
2016-12-12 22:21:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?wait@event@Concurrency@@QAEII@Z */
|
|
|
|
/* ?wait@event@Concurrency@@QEAA_KI@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(event_wait, 8)
|
2016-12-13 01:44:44 +01:00
|
|
|
MSVCRT_size_t __thiscall event_wait(event *this, unsigned int timeout)
|
2016-12-12 22:21:47 +01:00
|
|
|
{
|
2016-12-13 01:44:44 +01:00
|
|
|
thread_wait wait;
|
|
|
|
MSVCRT_size_t signaled;
|
|
|
|
|
|
|
|
TRACE("(%p %u)\n", this, timeout);
|
|
|
|
|
|
|
|
critical_section_lock(&this->cs);
|
|
|
|
signaled = this->signaled;
|
|
|
|
critical_section_unlock(&this->cs);
|
|
|
|
|
|
|
|
if(!timeout) return signaled ? 0 : COOPERATIVE_WAIT_TIMEOUT;
|
|
|
|
return signaled ? 0 : evt_wait(&wait, &this, 1, FALSE, timeout);
|
2016-12-12 22:21:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?wait_for_multiple@event@Concurrency@@SAIPAPAV12@I_NI@Z */
|
|
|
|
/* ?wait_for_multiple@event@Concurrency@@SA_KPEAPEAV12@_K_NI@Z */
|
|
|
|
int __cdecl event_wait_for_multiple(event **events, MSVCRT_size_t count, MSVCRT_bool wait_all, unsigned int timeout)
|
|
|
|
{
|
2016-12-13 01:44:44 +01:00
|
|
|
thread_wait *wait;
|
|
|
|
MSVCRT_size_t ret;
|
|
|
|
|
|
|
|
TRACE("(%p %ld %d %u)\n", events, count, wait_all, timeout);
|
2016-12-12 22:21:47 +01:00
|
|
|
|
2016-12-13 01:44:44 +01:00
|
|
|
if(count == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
wait = heap_alloc(FIELD_OFFSET(thread_wait, entries[count]));
|
|
|
|
if(!wait)
|
2017-03-27 10:26:54 +02:00
|
|
|
throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
|
2016-12-13 01:44:44 +01:00
|
|
|
ret = evt_wait(wait, events, count, wait_all, timeout);
|
|
|
|
heap_free(wait);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2014-06-21 14:22:28 +02:00
|
|
|
#endif
|
2014-06-24 11:57:47 +02:00
|
|
|
|
2016-11-17 16:56:06 +01:00
|
|
|
#if _MSVCR_VER >= 110
|
2016-11-17 16:56:13 +01:00
|
|
|
typedef struct cv_queue {
|
|
|
|
struct cv_queue *next;
|
|
|
|
BOOL expired;
|
|
|
|
} cv_queue;
|
|
|
|
|
2016-11-17 16:56:06 +01:00
|
|
|
typedef struct {
|
2016-11-17 16:56:13 +01:00
|
|
|
/* cv_queue structure is not binary compatible */
|
|
|
|
cv_queue *queue;
|
2016-11-17 16:56:06 +01:00
|
|
|
critical_section lock;
|
|
|
|
} _Condition_variable;
|
|
|
|
|
|
|
|
/* ??0_Condition_variable@details@Concurrency@@QAE@XZ */
|
|
|
|
/* ??0_Condition_variable@details@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_Condition_variable_ctor, 4)
|
|
|
|
_Condition_variable* __thiscall _Condition_variable_ctor(_Condition_variable *this)
|
|
|
|
{
|
2016-11-17 16:56:13 +01:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
this->queue = NULL;
|
|
|
|
critical_section_ctor(&this->lock);
|
|
|
|
return this;
|
2016-11-17 16:56:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1_Condition_variable@details@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1_Condition_variable@details@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_Condition_variable_dtor, 4)
|
|
|
|
void __thiscall _Condition_variable_dtor(_Condition_variable *this)
|
|
|
|
{
|
2016-11-17 16:56:13 +01:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
while(this->queue) {
|
|
|
|
cv_queue *next = this->queue->next;
|
|
|
|
if(!this->queue->expired)
|
|
|
|
ERR("there's an active wait\n");
|
|
|
|
HeapFree(GetProcessHeap(), 0, this->queue);
|
|
|
|
this->queue = next;
|
|
|
|
}
|
|
|
|
critical_section_dtor(&this->lock);
|
2016-11-17 16:56:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?wait@_Condition_variable@details@Concurrency@@QAEXAAVcritical_section@3@@Z */
|
|
|
|
/* ?wait@_Condition_variable@details@Concurrency@@QEAAXAEAVcritical_section@3@@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_Condition_variable_wait, 8)
|
|
|
|
void __thiscall _Condition_variable_wait(_Condition_variable *this, critical_section *cs)
|
|
|
|
{
|
2016-11-17 16:56:13 +01:00
|
|
|
cv_queue q;
|
|
|
|
|
|
|
|
TRACE("(%p, %p)\n", this, cs);
|
|
|
|
|
|
|
|
critical_section_lock(&this->lock);
|
|
|
|
q.next = this->queue;
|
|
|
|
q.expired = FALSE;
|
|
|
|
this->queue = &q;
|
|
|
|
critical_section_unlock(&this->lock);
|
|
|
|
|
|
|
|
critical_section_unlock(cs);
|
|
|
|
NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
|
|
|
|
critical_section_lock(cs);
|
2016-11-17 16:56:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?wait_for@_Condition_variable@details@Concurrency@@QAE_NAAVcritical_section@3@I@Z */
|
|
|
|
/* ?wait_for@_Condition_variable@details@Concurrency@@QEAA_NAEAVcritical_section@3@I@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_Condition_variable_wait_for, 12)
|
|
|
|
MSVCRT_bool __thiscall _Condition_variable_wait_for(_Condition_variable *this,
|
|
|
|
critical_section *cs, unsigned int timeout)
|
|
|
|
{
|
2016-11-17 16:56:13 +01:00
|
|
|
LARGE_INTEGER to;
|
|
|
|
NTSTATUS status;
|
|
|
|
FILETIME ft;
|
|
|
|
cv_queue *q;
|
|
|
|
|
|
|
|
TRACE("(%p %p %d)\n", this, cs, timeout);
|
|
|
|
|
|
|
|
if(!(q = HeapAlloc(GetProcessHeap(), 0, sizeof(cv_queue)))) {
|
2017-03-27 10:26:54 +02:00
|
|
|
throw_exception(EXCEPTION_BAD_ALLOC, 0, "bad allocation");
|
2016-11-17 16:56:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
critical_section_lock(&this->lock);
|
|
|
|
q->next = this->queue;
|
|
|
|
q->expired = FALSE;
|
|
|
|
this->queue = q;
|
|
|
|
critical_section_unlock(&this->lock);
|
|
|
|
|
|
|
|
critical_section_unlock(cs);
|
|
|
|
|
|
|
|
GetSystemTimeAsFileTime(&ft);
|
|
|
|
to.QuadPart = ((LONGLONG)ft.dwHighDateTime << 32) +
|
|
|
|
ft.dwLowDateTime + (LONGLONG)timeout * 10000;
|
|
|
|
status = NtWaitForKeyedEvent(keyed_event, q, 0, &to);
|
|
|
|
if(status == STATUS_TIMEOUT) {
|
|
|
|
if(!InterlockedExchange(&q->expired, TRUE)) {
|
|
|
|
critical_section_lock(cs);
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
NtWaitForKeyedEvent(keyed_event, q, 0, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
HeapFree(GetProcessHeap(), 0, q);
|
|
|
|
critical_section_lock(cs);
|
|
|
|
return TRUE;
|
2016-11-17 16:56:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?notify_one@_Condition_variable@details@Concurrency@@QAEXXZ */
|
|
|
|
/* ?notify_one@_Condition_variable@details@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_one, 4)
|
|
|
|
void __thiscall _Condition_variable_notify_one(_Condition_variable *this)
|
|
|
|
{
|
2016-11-17 16:56:13 +01:00
|
|
|
cv_queue *node;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if(!this->queue)
|
|
|
|
return;
|
|
|
|
|
|
|
|
while(1) {
|
|
|
|
critical_section_lock(&this->lock);
|
|
|
|
node = this->queue;
|
|
|
|
if(!node) {
|
|
|
|
critical_section_unlock(&this->lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
this->queue = node->next;
|
|
|
|
critical_section_unlock(&this->lock);
|
|
|
|
|
|
|
|
if(!InterlockedExchange(&node->expired, TRUE)) {
|
|
|
|
NtReleaseKeyedEvent(keyed_event, node, 0, NULL);
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
HeapFree(GetProcessHeap(), 0, node);
|
|
|
|
}
|
|
|
|
}
|
2016-11-17 16:56:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?notify_all@_Condition_variable@details@Concurrency@@QAEXXZ */
|
|
|
|
/* ?notify_all@_Condition_variable@details@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_Condition_variable_notify_all, 4)
|
|
|
|
void __thiscall _Condition_variable_notify_all(_Condition_variable *this)
|
|
|
|
{
|
2016-11-17 16:56:13 +01:00
|
|
|
cv_queue *ptr;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if(!this->queue)
|
|
|
|
return;
|
|
|
|
|
|
|
|
critical_section_lock(&this->lock);
|
|
|
|
ptr = this->queue;
|
|
|
|
this->queue = NULL;
|
|
|
|
critical_section_unlock(&this->lock);
|
|
|
|
|
|
|
|
while(ptr) {
|
|
|
|
cv_queue *next = ptr->next;
|
|
|
|
|
|
|
|
if(!InterlockedExchange(&ptr->expired, TRUE))
|
|
|
|
NtReleaseKeyedEvent(keyed_event, ptr, 0, NULL);
|
|
|
|
else
|
|
|
|
HeapFree(GetProcessHeap(), 0, ptr);
|
|
|
|
ptr = next;
|
|
|
|
}
|
2016-11-17 16:56:06 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-01-26 17:30:20 +01:00
|
|
|
#if _MSVCR_VER >= 100
|
|
|
|
typedef struct rwl_queue
|
|
|
|
{
|
|
|
|
struct rwl_queue *next;
|
|
|
|
} rwl_queue;
|
|
|
|
|
|
|
|
#define WRITER_WAITING 0x80000000
|
|
|
|
/* FIXME: reader_writer_lock structure is not binary compatible
|
|
|
|
* it can't exceed 28/56 bytes */
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
LONG count;
|
|
|
|
LONG thread_id;
|
|
|
|
rwl_queue active;
|
|
|
|
rwl_queue *writer_head;
|
|
|
|
rwl_queue *writer_tail;
|
|
|
|
rwl_queue *reader_head;
|
|
|
|
} reader_writer_lock;
|
|
|
|
|
|
|
|
/* ??0reader_writer_lock@Concurrency@@QAE@XZ */
|
|
|
|
/* ??0reader_writer_lock@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_ctor, 4)
|
|
|
|
reader_writer_lock* __thiscall reader_writer_lock_ctor(reader_writer_lock *this)
|
|
|
|
{
|
2017-01-26 17:30:30 +01:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if (!keyed_event) {
|
|
|
|
HANDLE event;
|
|
|
|
|
|
|
|
NtCreateKeyedEvent(&event, GENERIC_READ|GENERIC_WRITE, NULL, 0);
|
|
|
|
if (InterlockedCompareExchangePointer(&keyed_event, event, NULL) != NULL)
|
|
|
|
NtClose(event);
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(this, 0, sizeof(*this));
|
2017-01-26 17:30:20 +01:00
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1reader_writer_lock@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1reader_writer_lock@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_dtor, 4)
|
|
|
|
void __thiscall reader_writer_lock_dtor(reader_writer_lock *this)
|
|
|
|
{
|
2017-01-26 17:30:30 +01:00
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if (this->thread_id != 0 || this->count)
|
|
|
|
WARN("destroying locked reader_writer_lock\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void spin_wait_for_next_rwl(rwl_queue *q)
|
|
|
|
{
|
|
|
|
SpinWait sw;
|
|
|
|
|
|
|
|
if(q->next) return;
|
|
|
|
|
|
|
|
SpinWait_ctor(&sw, &spin_wait_yield);
|
|
|
|
SpinWait__Reset(&sw);
|
|
|
|
while(!q->next)
|
|
|
|
SpinWait__SpinOnce(&sw);
|
|
|
|
SpinWait_dtor(&sw);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove when proper InterlockedOr implementation is added to wine */
|
|
|
|
static LONG InterlockedOr(LONG *d, LONG v)
|
|
|
|
{
|
|
|
|
LONG l;
|
|
|
|
while (~(l = *d) & v)
|
|
|
|
if (InterlockedCompareExchange(d, l|v, l) == l) break;
|
|
|
|
return l;
|
|
|
|
}
|
|
|
|
|
|
|
|
static LONG InterlockedAnd(LONG *d, LONG v)
|
|
|
|
{
|
|
|
|
LONG l = *d, old;
|
|
|
|
while ((l & v) != l) {
|
|
|
|
if((old = InterlockedCompareExchange(d, l&v, l)) == l) break;
|
|
|
|
l = old;
|
|
|
|
}
|
|
|
|
return l;
|
2017-01-26 17:30:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?lock@reader_writer_lock@Concurrency@@QAEXXZ */
|
|
|
|
/* ?lock@reader_writer_lock@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock, 4)
|
|
|
|
void __thiscall reader_writer_lock_lock(reader_writer_lock *this)
|
|
|
|
{
|
2017-01-26 17:30:30 +01:00
|
|
|
rwl_queue q = { NULL }, *last;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if (this->thread_id == GetCurrentThreadId())
|
2017-03-27 10:26:54 +02:00
|
|
|
throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked");
|
2017-01-26 17:30:30 +01:00
|
|
|
|
|
|
|
last = InterlockedExchangePointer((void**)&this->writer_tail, &q);
|
|
|
|
if (last) {
|
|
|
|
last->next = &q;
|
|
|
|
NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
|
|
|
|
} else {
|
|
|
|
this->writer_head = &q;
|
|
|
|
if (InterlockedOr(&this->count, WRITER_WAITING))
|
|
|
|
NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
this->thread_id = GetCurrentThreadId();
|
|
|
|
this->writer_head = &this->active;
|
|
|
|
this->active.next = NULL;
|
|
|
|
if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
|
|
|
|
spin_wait_for_next_rwl(&q);
|
|
|
|
this->active.next = q.next;
|
|
|
|
}
|
2017-01-26 17:30:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?lock_read@reader_writer_lock@Concurrency@@QAEXXZ */
|
|
|
|
/* ?lock_read@reader_writer_lock@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_lock_read, 4)
|
|
|
|
void __thiscall reader_writer_lock_lock_read(reader_writer_lock *this)
|
|
|
|
{
|
2017-01-26 17:30:30 +01:00
|
|
|
rwl_queue q;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if (this->thread_id == GetCurrentThreadId())
|
2017-03-27 10:26:54 +02:00
|
|
|
throw_exception(EXCEPTION_IMPROPER_LOCK, 0, "Already locked as writer");
|
2017-01-26 17:30:30 +01:00
|
|
|
|
|
|
|
do {
|
|
|
|
q.next = this->reader_head;
|
|
|
|
} while(InterlockedCompareExchangePointer((void**)&this->reader_head, &q, q.next) != q.next);
|
|
|
|
|
|
|
|
if (!q.next) {
|
|
|
|
rwl_queue *head;
|
|
|
|
LONG count;
|
|
|
|
|
|
|
|
while (!((count = this->count) & WRITER_WAITING))
|
|
|
|
if (InterlockedCompareExchange(&this->count, count+1, count) == count) break;
|
|
|
|
|
|
|
|
if (count & WRITER_WAITING)
|
|
|
|
NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
|
|
|
|
|
|
|
|
head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
|
|
|
|
while(head && head != &q) {
|
|
|
|
rwl_queue *next = head->next;
|
|
|
|
InterlockedIncrement(&this->count);
|
|
|
|
NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
|
|
|
|
head = next;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
NtWaitForKeyedEvent(keyed_event, &q, 0, NULL);
|
|
|
|
}
|
2017-01-26 17:30:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ?try_lock@reader_writer_lock@Concurrency@@QAE_NXZ */
|
|
|
|
/* ?try_lock@reader_writer_lock@Concurrency@@QEAA_NXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock, 4)
|
|
|
|
MSVCRT_bool __thiscall reader_writer_lock_try_lock(reader_writer_lock *this)
|
|
|
|
{
|
2017-01-26 17:30:30 +01:00
|
|
|
rwl_queue q = { NULL };
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if (this->thread_id == GetCurrentThreadId())
|
2017-03-27 10:26:27 +02:00
|
|
|
return FALSE;
|
2017-01-26 17:30:30 +01:00
|
|
|
|
|
|
|
if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &q, NULL))
|
|
|
|
return FALSE;
|
|
|
|
this->writer_head = &q;
|
|
|
|
if (!InterlockedCompareExchange(&this->count, WRITER_WAITING, 0)) {
|
|
|
|
this->thread_id = GetCurrentThreadId();
|
|
|
|
this->writer_head = &this->active;
|
|
|
|
this->active.next = NULL;
|
|
|
|
if (InterlockedCompareExchangePointer((void**)&this->writer_tail, &this->active, &q) != &q) {
|
|
|
|
spin_wait_for_next_rwl(&q);
|
|
|
|
this->active.next = q.next;
|
|
|
|
}
|
|
|
|
return TRUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, &q) == &q)
|
|
|
|
return FALSE;
|
|
|
|
spin_wait_for_next_rwl(&q);
|
|
|
|
this->writer_head = q.next;
|
|
|
|
if (!InterlockedOr(&this->count, WRITER_WAITING)) {
|
|
|
|
this->thread_id = GetCurrentThreadId();
|
|
|
|
this->writer_head = &this->active;
|
|
|
|
this->active.next = q.next;
|
|
|
|
return TRUE;
|
|
|
|
}
|
2017-01-26 17:30:20 +01:00
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?try_lock_read@reader_writer_lock@Concurrency@@QAE_NXZ */
|
|
|
|
/* ?try_lock_read@reader_writer_lock@Concurrency@@QEAA_NXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_try_lock_read, 4)
|
|
|
|
MSVCRT_bool __thiscall reader_writer_lock_try_lock_read(reader_writer_lock *this)
|
|
|
|
{
|
2017-01-26 17:30:30 +01:00
|
|
|
LONG count;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
while (!((count = this->count) & WRITER_WAITING))
|
|
|
|
if (InterlockedCompareExchange(&this->count, count+1, count) == count) return TRUE;
|
2017-01-26 17:30:20 +01:00
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?unlock@reader_writer_lock@Concurrency@@QAEXXZ */
|
|
|
|
/* ?unlock@reader_writer_lock@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_unlock, 4)
|
|
|
|
void __thiscall reader_writer_lock_unlock(reader_writer_lock *this)
|
|
|
|
{
|
2017-01-26 17:30:30 +01:00
|
|
|
LONG count;
|
|
|
|
rwl_queue *head, *next;
|
|
|
|
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
if ((count = this->count) & ~WRITER_WAITING) {
|
|
|
|
count = InterlockedDecrement(&this->count);
|
|
|
|
if (count != WRITER_WAITING)
|
|
|
|
return;
|
|
|
|
NtReleaseKeyedEvent(keyed_event, this->writer_head, 0, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
this->thread_id = 0;
|
|
|
|
next = this->writer_head->next;
|
|
|
|
if (next) {
|
|
|
|
NtReleaseKeyedEvent(keyed_event, next, 0, NULL);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
InterlockedAnd(&this->count, ~WRITER_WAITING);
|
|
|
|
head = InterlockedExchangePointer((void**)&this->reader_head, NULL);
|
|
|
|
while (head) {
|
|
|
|
next = head->next;
|
|
|
|
InterlockedIncrement(&this->count);
|
|
|
|
NtReleaseKeyedEvent(keyed_event, head, 0, NULL);
|
|
|
|
head = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (InterlockedCompareExchangePointer((void**)&this->writer_tail, NULL, this->writer_head) == this->writer_head)
|
|
|
|
return;
|
|
|
|
InterlockedOr(&this->count, WRITER_WAITING);
|
2017-01-26 17:30:20 +01:00
|
|
|
}
|
2017-02-27 18:15:56 +01:00
|
|
|
|
2017-05-09 16:34:49 +02:00
|
|
|
typedef struct {
|
|
|
|
reader_writer_lock *lock;
|
|
|
|
} reader_writer_lock_scoped_lock;
|
|
|
|
|
|
|
|
/* ??0scoped_lock@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
|
|
|
|
/* ??0scoped_lock@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_ctor, 8)
|
|
|
|
reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_ctor(
|
|
|
|
reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
|
|
|
|
{
|
|
|
|
TRACE("(%p %p)\n", this, lock);
|
|
|
|
|
|
|
|
this->lock = lock;
|
|
|
|
reader_writer_lock_lock(lock);
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1scoped_lock@reader_writer_lock@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1scoped_lock@reader_writer_lock@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_dtor, 4)
|
|
|
|
void __thiscall reader_writer_lock_scoped_lock_dtor(reader_writer_lock_scoped_lock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
reader_writer_lock_unlock(this->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QAE@AAV12@@Z */
|
|
|
|
/* ??0scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@AEAV12@@Z */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_ctor, 8)
|
|
|
|
reader_writer_lock_scoped_lock* __thiscall reader_writer_lock_scoped_lock_read_ctor(
|
|
|
|
reader_writer_lock_scoped_lock *this, reader_writer_lock *lock)
|
|
|
|
{
|
|
|
|
TRACE("(%p %p)\n", this, lock);
|
|
|
|
|
|
|
|
this->lock = lock;
|
|
|
|
reader_writer_lock_lock_read(lock);
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1scoped_lock_read@reader_writer_lock@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(reader_writer_lock_scoped_lock_read_dtor, 4)
|
|
|
|
void __thiscall reader_writer_lock_scoped_lock_read_dtor(reader_writer_lock_scoped_lock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
reader_writer_lock_unlock(this->lock);
|
|
|
|
}
|
|
|
|
|
2017-02-27 18:15:56 +01:00
|
|
|
typedef struct {
|
|
|
|
CRITICAL_SECTION cs;
|
|
|
|
} _ReentrantBlockingLock;
|
|
|
|
|
|
|
|
/* ??0_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
|
|
|
|
/* ??0_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_ctor, 4)
|
|
|
|
_ReentrantBlockingLock* __thiscall _ReentrantBlockingLock_ctor(_ReentrantBlockingLock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
InitializeCriticalSection(&this->cs);
|
|
|
|
this->cs.DebugInfo->Spare[0] = (DWORD_PTR)(__FILE__ ": _ReentrantBlockingLock");
|
|
|
|
return this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ??1_ReentrantBlockingLock@details@Concurrency@@QAE@XZ */
|
|
|
|
/* ??1_ReentrantBlockingLock@details@Concurrency@@QEAA@XZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock_dtor, 4)
|
|
|
|
void __thiscall _ReentrantBlockingLock_dtor(_ReentrantBlockingLock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
|
|
|
|
this->cs.DebugInfo->Spare[0] = 0;
|
|
|
|
DeleteCriticalSection(&this->cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
|
|
|
|
/* ?_Acquire@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Acquire, 4)
|
|
|
|
void __thiscall _ReentrantBlockingLock__Acquire(_ReentrantBlockingLock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
EnterCriticalSection(&this->cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QAEXXZ */
|
|
|
|
/* ?_Release@_ReentrantBlockingLock@details@Concurrency@@QEAAXXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__Release, 4)
|
|
|
|
void __thiscall _ReentrantBlockingLock__Release(_ReentrantBlockingLock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
LeaveCriticalSection(&this->cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QAE_NXZ */
|
|
|
|
/* ?_TryAcquire@_ReentrantBlockingLock@details@Concurrency@@QEAA_NXZ */
|
|
|
|
DEFINE_THISCALL_WRAPPER(_ReentrantBlockingLock__TryAcquire, 4)
|
|
|
|
MSVCRT_bool __thiscall _ReentrantBlockingLock__TryAcquire(_ReentrantBlockingLock *this)
|
|
|
|
{
|
|
|
|
TRACE("(%p)\n", this);
|
|
|
|
return TryEnterCriticalSection(&this->cs);
|
|
|
|
}
|
2018-09-06 22:13:04 +02:00
|
|
|
|
|
|
|
/* ?wait@Concurrency@@YAXI@Z */
|
|
|
|
void __cdecl Concurrency_wait(unsigned int time)
|
|
|
|
{
|
|
|
|
static int once;
|
|
|
|
|
|
|
|
if (!once++) FIXME("(%d) stub!\n", time);
|
|
|
|
|
|
|
|
Sleep(time);
|
|
|
|
}
|
2017-01-26 17:30:20 +01:00
|
|
|
#endif
|
|
|
|
|
2017-02-28 11:21:12 +01:00
|
|
|
#if _MSVCR_VER == 110
|
|
|
|
static LONG shared_ptr_lock;
|
|
|
|
|
|
|
|
void __cdecl _Lock_shared_ptr_spin_lock(void)
|
|
|
|
{
|
|
|
|
LONG l = 0;
|
|
|
|
|
|
|
|
while(InterlockedCompareExchange(&shared_ptr_lock, 1, 0) != 0) {
|
|
|
|
if(l++ == 1000) {
|
|
|
|
Sleep(0);
|
|
|
|
l = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void __cdecl _Unlock_shared_ptr_spin_lock(void)
|
|
|
|
{
|
|
|
|
shared_ptr_lock = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-06-24 11:57:47 +02:00
|
|
|
/**********************************************************************
|
|
|
|
* msvcrt_free_locks (internal)
|
|
|
|
*
|
|
|
|
* Uninitialize all mt locks. Assume that neither _lock or _unlock will
|
|
|
|
* be called once we're calling this routine (ie _LOCKTAB_LOCK can be deleted)
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void msvcrt_free_locks(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
TRACE( ": uninitializing all mtlocks\n" );
|
|
|
|
|
|
|
|
/* Uninitialize the table */
|
|
|
|
for( i=0; i < _TOTAL_LOCKS; i++ )
|
|
|
|
{
|
|
|
|
if( lock_table[ i ].bInit )
|
|
|
|
{
|
|
|
|
msvcrt_uninitialize_mlock( i );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if _MSVCR_VER >= 100
|
|
|
|
if(keyed_event)
|
|
|
|
NtClose(keyed_event);
|
|
|
|
#endif
|
|
|
|
}
|