12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION 16 #define TIME_QUANTUM_USEC (10 * 1000) 17 #define RB_CONDATTR_CLOCK_MONOTONIC 1 21 #define native_thread_yield() Sleep(0) 22 #define unregister_ubf_list(th) 24 static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
27 static int native_mutex_lock(rb_nativethread_lock_t *lock);
28 static int native_mutex_unlock(rb_nativethread_lock_t *lock);
31 w32_error(
const char *func)
35 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
36 FORMAT_MESSAGE_FROM_SYSTEM |
37 FORMAT_MESSAGE_IGNORE_INSERTS,
40 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
41 (LPTSTR) & lpMsgBuf, 0,
NULL) == 0)
42 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
43 FORMAT_MESSAGE_FROM_SYSTEM |
44 FORMAT_MESSAGE_IGNORE_INSERTS,
47 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
48 (LPTSTR) & lpMsgBuf, 0,
NULL);
49 rb_bug(
"%s: %s", func, (
char*)lpMsgBuf);
53 w32_mutex_lock(HANDLE lock)
58 result = w32_wait_events(&lock, 1, INFINITE, 0);
64 case WAIT_OBJECT_0 + 1:
73 rb_bug(
"win32_mutex_lock: WAIT_ABANDONED");
76 rb_bug(
"win32_mutex_lock: unknown result (%ld)", result);
84 w32_mutex_create(
void)
88 w32_error(
"native_mutex_initialize");
99 if (GVL_DEBUG) fprintf(stderr,
"gvl acquire (%p): acquire\n", th);
112 native_thread_yield();
119 if (GVL_DEBUG) fprintf(stderr,
"gvl init\n");
120 vm->
gvl.
lock = w32_mutex_create();
126 if (GVL_DEBUG) fprintf(stderr,
"gvl destroy\n");
131 ruby_thread_from_native(
void)
133 return TlsGetValue(ruby_native_thread_key);
139 return TlsSetValue(ruby_native_thread_key, th);
147 ruby_native_thread_key = TlsAlloc();
148 ruby_thread_set_native(th);
149 DuplicateHandle(GetCurrentProcess(),
156 thread_debug(
"initial thread (th: %p, thid: %p, event: %p)\n",
164 HANDLE *targets = events;
166 const int initcount =
count;
169 thread_debug(
" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
170 events, count, timeout, th);
173 targets =
ALLOCA_N(HANDLE, count + 1);
174 memcpy(targets, events,
sizeof(HANDLE) * count);
176 targets[count++] = intr;
177 thread_debug(
" * handle: %p (count: %d, intr)\n", intr, count);
180 w32_error(
"w32_wait_events");
184 thread_debug(
" WaitForMultipleObjects start (count: %d)\n", count);
185 ret = WaitForMultipleObjects(count, targets,
FALSE, timeout);
186 thread_debug(
" WaitForMultipleObjects end (ret: %lu)\n", ret);
188 if (ret == (
DWORD)(WAIT_OBJECT_0 + initcount) && th) {
194 for (i = 0; i <
count; i++) {
196 GetHandleInformation(targets[i], &dmy) ?
"OK" :
"NG");
202 static void ubf_handle(
void *ptr);
203 #define ubf_select ubf_handle 208 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
217 ubf_handle, ruby_thread_from_native(),
FALSE);
222 w32_close_handle(HANDLE handle)
224 if (CloseHandle(handle) == 0) {
225 w32_error(
"w32_close_handle");
230 w32_resume_thread(HANDLE handle)
232 if (ResumeThread(handle) == (
DWORD)-1) {
233 w32_error(
"w32_resume_thread");
238 #define HAVE__BEGINTHREADEX 1 240 #undef HAVE__BEGINTHREADEX 243 #ifdef HAVE__BEGINTHREADEX 244 #define start_thread (HANDLE)_beginthreadex 245 #define thread_errno errno 246 typedef unsigned long (__stdcall *w32_thread_start_func)(
void*);
248 #define start_thread CreateThread 249 #define thread_errno rb_w32_map_errno(GetLastError()) 250 typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
254 w32_create_thread(
DWORD stack_size, w32_thread_start_func func,
void *
val)
256 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED, 0);
262 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
271 ubf_handle, ruby_thread_from_native(),
FALSE);
278 const volatile DWORD msec = (tv) ?
295 ret = w32_wait_events(0, 0, msec, th);
308 native_mutex_lock(rb_nativethread_lock_t *lock)
311 w32_mutex_lock(lock->mutex);
313 EnterCriticalSection(&lock->crit);
319 native_mutex_unlock(rb_nativethread_lock_t *lock)
323 return ReleaseMutex(lock->mutex);
325 LeaveCriticalSection(&lock->crit);
331 native_mutex_trylock(rb_nativethread_lock_t *lock)
335 thread_debug(
"native_mutex_trylock: %p\n", lock->mutex);
336 result = w32_wait_events(&lock->mutex, 1, 1, 0);
337 thread_debug(
"native_mutex_trylock result: %d\n", result);
351 native_mutex_initialize(rb_nativethread_lock_t *lock)
354 lock->mutex = w32_mutex_create();
357 InitializeCriticalSection(&lock->crit);
362 native_mutex_destroy(rb_nativethread_lock_t *lock)
365 w32_close_handle(lock->mutex);
367 DeleteCriticalSection(&lock->crit);
371 struct cond_event_entry {
372 struct cond_event_entry* next;
373 struct cond_event_entry* prev;
382 struct cond_event_entry *e = cond->
next;
383 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
386 struct cond_event_entry *next = e->next;
387 struct cond_event_entry *prev = e->prev;
391 e->next = e->prev = e;
401 struct cond_event_entry *e = cond->
next;
402 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
405 struct cond_event_entry *next = e->next;
406 struct cond_event_entry *prev = e->prev;
412 e->next = e->prev = e;
422 struct cond_event_entry entry;
423 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
425 entry.event = CreateEvent(0,
FALSE,
FALSE, 0);
429 entry.prev = head->prev;
430 head->prev->next = &entry;
433 native_mutex_unlock(mutex);
435 r = WaitForSingleObject(entry.event, msec);
436 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
437 rb_bug(
"native_cond_wait: WaitForSingleObject returns %lu", r);
440 native_mutex_lock(mutex);
442 entry.prev->next = entry.next;
443 entry.next->prev = entry.prev;
445 w32_close_handle(entry.event);
446 return (r == WAIT_OBJECT_0) ? 0 :
ETIMEDOUT;
452 return native_cond_timedwait_ms(cond, mutex, INFINITE);
456 abs_timespec_to_timeout_ms(
const struct timespec *ts)
474 unsigned long timeout_ms;
476 timeout_ms = abs_timespec_to_timeout_ms(ts);
480 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
495 now.tv_nsec = tv.
tv_usec * 1000;
497 timeout.tv_sec = now.tv_sec;
498 timeout.tv_nsec = now.tv_nsec;
499 timeout.tv_sec += timeout_rel.tv_sec;
500 timeout.tv_nsec += timeout_rel.tv_nsec;
502 if (timeout.tv_nsec >= 1000*1000*1000) {
504 timeout.tv_nsec -= 1000*1000*1000;
507 if (timeout.tv_sec < now.tv_sec)
508 timeout.tv_sec = TIMET_MAX;
516 cond->
next = (
struct cond_event_entry *)cond;
517 cond->
prev = (
struct cond_event_entry *)cond;
532 #define CHECK_ERR(expr) \ 533 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}} 538 MEMORY_BASIC_INFORMATION mi;
542 CHECK_ERR(VirtualQuery(&mi, &mi,
sizeof(mi)));
543 base = mi.AllocationBase;
544 end = mi.BaseAddress;
545 end += mi.RegionSize;
548 if (space > 1024*1024) space = 1024*1024;
553 #ifndef InterlockedExchangePointer 554 #define InterlockedExchangePointer(t, v) \ 555 (void *)InterlockedExchange((long *)(t), (long)(v)) 562 w32_close_handle(intr);
565 static unsigned long __stdcall
566 thread_start_func_1(
void *th_ptr)
569 volatile HANDLE thread_id = th->
thread_id;
571 native_thread_init_stack(th);
575 thread_debug(
"thread created (th: %p, thid: %p, event: %p)\n", th,
580 w32_close_handle(thread_id);
588 size_t stack_size = 4 * 1024;
589 th->
thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
607 native_thread_join(HANDLE th)
609 w32_wait_events(&th, 1, INFINITE, 0);
612 #if USE_NATIVE_THREAD_PRIORITY 619 priority = THREAD_PRIORITY_ABOVE_NORMAL;
622 priority = THREAD_PRIORITY_BELOW_NORMAL;
625 priority = THREAD_PRIORITY_NORMAL;
628 SetThreadPriority(th->
thread_id, priority);
658 return w32_wait_events(0, 0, 0, th);
662 ubf_handle(
void *ptr)
668 w32_error(
"ubf_handle");
674 #define native_set_another_thread_name rb_w32_set_thread_description_str 680 #define TIMER_THREAD_CREATED_P() (timer_thread.id != 0) 682 static unsigned long __stdcall
683 timer_thread_func(
void *dummy)
687 while (WaitForSingleObject(timer_thread.lock, TIME_QUANTUM_USEC/1000) ==
689 timer_thread_function(dummy);
702 rb_thread_create_timer_thread(
void)
704 if (timer_thread.id == 0) {
705 if (!timer_thread.lock) {
706 timer_thread.lock = CreateEvent(0,
TRUE,
FALSE, 0);
708 timer_thread.id = w32_create_thread(1024 + (
THREAD_DEBUG ? BUFSIZ : 0),
709 timer_thread_func, 0);
710 w32_resume_thread(timer_thread.id);
715 native_stop_timer_thread(
void)
717 int stopped = --system_working <= 0;
719 SetEvent(timer_thread.lock);
720 native_thread_join(timer_thread.id);
721 CloseHandle(timer_thread.lock);
722 timer_thread.lock = 0;
728 native_reset_timer_thread(
void)
730 if (timer_thread.id) {
731 CloseHandle(timer_thread.id);
737 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
742 #if defined(__MINGW32__) 744 rb_w32_stack_overflow_handler(
struct _EXCEPTION_POINTERS *exception)
746 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
750 return EXCEPTION_CONTINUE_SEARCH;
754 #ifdef RUBY_ALLOCA_CHKSTK 756 ruby_alloca_chkstk(
size_t len,
void *sp)
776 return GetCurrentThread();
RUBY_SYMBOL_EXPORT_BEGIN rb_nativethread_id_t rb_nativethread_self()
void rb_bug(const char *fmt,...)
int gettimeofday(struct timeval *, struct timezone *)
int rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
int rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
size_t ruby_stack_length(VALUE **p)
rb_unblock_function_t * func
int rb_w32_set_thread_description(HANDLE th, const WCHAR *name)
#define cond(node, column)
WINBASEAPI BOOL WINAPI TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection)
void rb_thread_wakeup_timer_thread(void)
int WINAPI rb_w32_Sleep(unsigned long msec)
int rb_w32_sleep(unsigned long msec)
struct cond_event_entry * next
#define ALLOCA_N(type, n)
int rb_w32_select_with_thread(int nfds, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *timeout, void *th)
#define GVL_UNLOCK_BEGIN()
void rb_sys_fail(const char *mesg)
int rb_w32_time_subtract(struct timeval *rest, const struct timeval *wait)
struct cond_event_entry * prev
int rb_w32_set_thread_description_str(HANDLE th, VALUE name)
#define rb_thread_raised_set(th, f)
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
rb_nativethread_lock_t lock
void Init_native_thread(void)
void ruby_init_stack(volatile VALUE *)
int rb_reserved_fd_p(int fd)
struct rb_execution_context_struct::@143 machine
register unsigned int len
#define thread_start_func_2(th, st, rst)
struct rb_unblock_callback unblock
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
rb_nativethread_id_t thread_id
native_thread_data_t native_thread_data
#define rb_thread_raised_p(th, f)
#define rb_fd_resize(n, f)
rb_execution_context_t ec
#define RUBY_VM_INTERRUPTED(th)
rb_nativethread_lock_t interrupt_lock
int rb_w32_check_interrupt(void *)