60 #undef _FORTIFY_SOURCE 61 #undef __USE_FORTIFY_LEVEL 62 #define __USE_FORTIFY_LEVEL 0 75 #ifndef USE_NATIVE_THREAD_PRIORITY 76 #define USE_NATIVE_THREAD_PRIORITY 0 77 #define RUBY_THREAD_PRIORITY_MAX 3 78 #define RUBY_THREAD_PRIORITY_MIN -3 82 #define THREAD_DEBUG 0 85 static VALUE rb_cThreadShield;
87 static VALUE sym_immediate;
88 static VALUE sym_on_blocking;
89 static VALUE sym_never;
93 static void sleep_wait_for_interrupt(
rb_thread_t *th,
double sleepsec,
int spurious_check);
94 static void sleep_forever(
rb_thread_t *th,
int nodeadlock,
int spurious_check);
95 static void rb_thread_sleep_deadly_allow_spurious_wakeup(
void);
96 static double timeofday(
void);
98 static void rb_check_deadlock(
rb_vm_t *vm);
99 static int rb_threadptr_pending_interrupt_empty_p(
rb_thread_t *th);
101 #define eKillSignal INT2FIX(0) 102 #define eTerminateSignal INT2FIX(1) 103 static volatile int system_working = 1;
119 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION 133 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \ 134 do{(th)->machine.register_stack_end = rb_ia64_bsp();}while(0) 136 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) 138 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \ 140 FLUSH_REGISTER_WINDOWS; \ 141 RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \ 142 setjmp((th)->ec.machine.regs); \ 143 SET_MACHINE_STACK_END(&(th)->ec.machine.stack_end); \ 146 #define GVL_UNLOCK_BEGIN() do { \ 147 rb_thread_t *_th_stored = GET_THREAD(); \ 148 RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \ 149 gvl_release(_th_stored->vm); 151 #define GVL_UNLOCK_END() \ 152 gvl_acquire(_th_stored->vm, _th_stored); \ 153 rb_thread_set_current(_th_stored); \ 157 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P 158 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst)) 160 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst)) 163 #define only_if_constant(expr, notconst) notconst 165 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \ 166 rb_thread_t *__th = GET_THREAD(); \ 167 struct rb_blocking_region_buffer __region; \ 168 if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \ 170 !only_if_constant(fail_if_interrupted, TRUE)) { \ 172 blocking_region_end(__th, &__region); \ 176 #define RUBY_VM_CHECK_INTS_BLOCKING(th) vm_check_ints_blocking(th) 180 if (
LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
192 vm_living_thread_num(
rb_vm_t *vm)
198 #ifdef HAVE_VA_ARGS_MACRO 199 void rb_thread_debug(
const char *file,
int line,
const char *fmt, ...);
200 #define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__) 201 #define POSITION_FORMAT "%s:%d:" 202 #define POSITION_ARGS ,file, line 204 void rb_thread_debug(
const char *fmt, ...);
205 #define thread_debug rb_thread_debug 206 #define POSITION_FORMAT 207 #define POSITION_ARGS 210 # ifdef NON_SCALAR_THREAD_ID 211 #define fill_thread_id_string ruby_fill_thread_id_string 220 for (i = 0; i <
sizeof(thid); i++) {
221 # ifdef LITTLE_ENDIAN 222 size_t j =
sizeof(thid) - i - 1;
226 unsigned char c = (
unsigned char)((
char *)&thid)[j];
227 buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
228 buf[3 + i * 2] = ruby_digitmap[c & 0xf];
233 # define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string) 234 # define thread_id_str(th) ((th)->thread_id_string) 235 # define PRI_THREAD_ID "s" 238 # if THREAD_DEBUG < 0 239 static int rb_thread_debug_enabled;
250 rb_thread_s_debug(
void)
252 return INT2NUM(rb_thread_debug_enabled);
266 rb_thread_debug_enabled =
RTEST(val) ?
NUM2INT(val) : 0;
270 # define rb_thread_debug_enabled THREAD_DEBUG 273 #define thread_debug if(0)printf 276 #ifndef fill_thread_id_str 277 # define fill_thread_id_string(thid, buf) (void *)(thid) 278 # define fill_thread_id_str(th) (void)0 279 # define thread_id_str(th) ((void *)(th)->thread_id) 280 # define PRI_THREAD_ID "p" 284 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st) 287 VALUE *register_stack_start));
288 static void timer_thread_function(
void *);
293 #define DEBUG_OUT() \ 294 WaitForSingleObject(&debug_mutex, INFINITE); \ 295 printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \ 297 ReleaseMutex(&debug_mutex); 299 #elif defined(HAVE_PTHREAD_H) 302 #define DEBUG_OUT() \ 303 pthread_mutex_lock(&debug_mutex); \ 304 printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \ 305 fill_thread_id_string(pthread_self(), thread_id_string), buf); \ 307 pthread_mutex_unlock(&debug_mutex); 310 #error "unsupported thread type" 314 static int debug_mutex_initialized = 1;
315 static rb_nativethread_lock_t debug_mutex;
319 #ifdef HAVE_VA_ARGS_MACRO
320 const char *file,
int line,
322 const char *fmt, ...)
326 #ifdef NON_SCALAR_THREAD_ID 330 if (!rb_thread_debug_enabled)
return;
332 if (debug_mutex_initialized == 1) {
333 debug_mutex_initialized = 0;
334 native_mutex_initialize(&debug_mutex);
358 native_mutex_initialize(lock);
364 native_mutex_destroy(lock);
370 native_mutex_lock(lock);
376 native_mutex_unlock(lock);
383 if (fail_if_interrupted) {
414 rb_threadptr_interrupt_common(
rb_thread_t *th,
int trap)
435 rb_threadptr_interrupt_common(th, 0);
441 rb_threadptr_interrupt_common(th, 1);
450 if (th != main_thread) {
456 thread_debug(
"terminate_i: main thread (%p)\n", (
void *)th);
473 err = rb_mutex_unlock_th(mutex, th);
474 if (err)
rb_bug(
"invalid keeping_mutexes: %s", err);
483 volatile int sleeping = 0;
486 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
496 thread_debug(
"rb_thread_terminate_all (main thread: %p)\n", (
void *)th);
497 terminate_all(vm, th);
499 while (vm_living_thread_num(vm) > 1) {
525 thread_cleanup_func_before_exec(
void *th_ptr)
536 thread_cleanup_func(
void *th_ptr,
int atfork)
541 thread_cleanup_func_before_exec(th_ptr);
552 native_thread_destroy(th);
561 native_thread_init_stack(th);
567 const VALUE *ep = vm_proc_ep(proc);
580 native_set_thread_name(th);
606 # ifdef USE_SIGALTSTACK 609 rb_register_sigaltstack(th);
613 rb_bug(
"thread_start_func_2 must not be used for main thread");
615 ruby_thread_set_native(th);
619 th->
ec.
machine.register_stack_start = register_stack_start;
623 gvl_acquire(th->vm, th);
625 thread_debug(
"thread start (get lock): %p\n", (
void *)th);
633 errinfo = th->ec.errinfo;
641 if (th->report_on_exception) {
642 VALUE mesg = rb_thread_to_s(th->self);
647 if (th->vm->thread_abort_on_exception ||
667 rb_threadptr_raise(main_th, 1, &errinfo);
672 if (th->locking_mutex !=
Qfalse) {
673 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
674 (
void *)th, th->locking_mutex);
678 rb_vm_living_threads_remove(th->vm, th);
685 join_list = th->join_list;
693 join_list = join_list->
next;
697 rb_check_deadlock(th->vm);
700 th->ec.vm_stack =
NULL;
702 native_mutex_lock(&th->vm->thread_destruct_lock);
704 th->vm->running_thread =
NULL;
705 native_mutex_unlock(&th->vm->thread_destruct_lock);
706 thread_cleanup_func(th,
FALSE);
720 "can't start a new thread (frozen ThreadGroup)");
728 th->
priority = current_th->priority;
729 th->
thgroup = current_th->thgroup;
742 err = native_thread_create(th);
747 rb_vm_living_threads_insert(th->
vm, th);
751 #define threadptr_initialized(th) ((th)->first_args != 0) 783 th = rb_thread_ptr(thread);
826 return thread_create_core(thread, args, 0);
838 #define DELAY_INFTY 1E30 846 remove_from_join_list(
VALUE arg)
855 if ((*p)->th == th) {
867 thread_join_sleep(
VALUE arg)
872 const double limit = forever ? 0 : timeofday() + p->
delay;
879 double now = timeofday();
885 sleep_wait_for_interrupt(th, limit - now, 0);
899 if (th == target_th) {
902 if (
GET_VM()->main_thread == target_th) {
918 remove_from_join_list, (
VALUE)&arg)) {
935 rb_bug(
"thread_join: Fixnum (%d) should not reach here.",
FIX2INT(err));
939 rb_bug(
"thread_join: THROW_DATA should not reach here.");
946 return target_th->
self;
999 return thread_join(rb_thread_ptr(
self), delay);
1017 thread_value(
VALUE self)
1041 #if SIGNEDNESS_OF_TIME_T < 0 1042 # define TIMEVAL_SEC_MAX SIGNED_INTEGER_MAX(TYPEOF_TIMEVAL_TV_SEC) 1043 # define TIMEVAL_SEC_MIN SIGNED_INTEGER_MIN(TYPEOF_TIMEVAL_TV_SEC) 1044 #elif SIGNEDNESS_OF_TIME_T > 0 1045 # define TIMEVAL_SEC_MAX ((TYPEOF_TIMEVAL_TV_SEC)(~(unsigned_time_t)0)) 1046 # define TIMEVAL_SEC_MIN ((TYPEOF_TIMEVAL_TV_SEC)0) 1050 double2timeval(double d)
1053 const double TIMEVAL_SEC_MAX_PLUS_ONE = (2*(double)(TIMEVAL_SEC_MAX/2+1));
1057 if (TIMEVAL_SEC_MAX_PLUS_ONE <= d) {
1058 time.
tv_sec = TIMEVAL_SEC_MAX;
1061 else if (d <= TIMEVAL_SEC_MIN) {
1062 time.
tv_sec = TIMEVAL_SEC_MIN;
1067 time.
tv_usec = (int)((d - (time_t)d) * 1e6);
1077 sleep_forever(
rb_thread_t *th,
int deadlockable,
int spurious_check)
1084 while (th->
status == status) {
1087 rb_check_deadlock(th->
vm);
1089 native_sleep(th, 0);
1094 if (!spurious_check)
1097 th->
status = prev_status;
1101 getclockofday(
struct timeval *tp)
1103 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 1125 to.
tv_sec = TIMEVAL_SEC_MAX;
1129 if (to.
tv_sec == TIMEVAL_SEC_MAX)
1140 native_sleep(th, &tv);
1142 getclockofday(&tvn);
1153 if (!spurious_check)
1156 th->
status = prev_status;
1174 rb_thread_sleep_deadly_allow_spurious_wakeup(
void)
1176 thread_debug(
"rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1183 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) 1199 sleep_wait_for_interrupt(
rb_thread_t *th,
double sleepsec,
int spurious_check)
1201 sleep_timeval(th, double2timeval(sleepsec), spurious_check);
1208 sleep_timeval(th, time, 1);
1249 rb_thread_schedule_limits(
uint32_t limits_us)
1258 gvl_yield(th->
vm, th);
1269 rb_thread_schedule_limits(0);
1281 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1284 thread_debug(
"enter blocking region (%p)\n", (
void *)th);
1286 gvl_release(th->vm);
1297 gvl_acquire(th->
vm, th);
1299 thread_debug(
"leave blocking region (%p)\n", (
void *)th);
1300 unregister_ubf_list(th);
1301 th->blocking_region_buffer = 0;
1302 unblock_function_clear(th);
1309 call_without_gvl(
void *(*func)(
void *),
void *data1,
1315 int saved_errno = 0;
1324 saved_errno =
errno;
1325 }, ubf, data2, fail_if_interrupted);
1327 if (!fail_if_interrupted) {
1331 errno = saved_errno;
1425 return call_without_gvl(func, data1, ubf, data2,
TRUE);
1432 return call_without_gvl(func, data1, ubf, data2,
FALSE);
1441 volatile int saved_errno = 0;
1453 saved_errno =
errno;
1467 errno = saved_errno;
1514 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1522 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
1525 blocking_region_end(th, brb);
1529 blocking_region_begin(th, brb, prev_unblock.
func, prev_unblock.
arg,
FALSE);
1564 thread_s_pass(
VALUE klass)
1602 threadptr_check_pending_interrupt_queue(
rb_thread_t *th)
1625 for (i=0; i<mask_stack_len; i++) {
1626 mask = mask_stack[mask_stack_len-(i+1)];
1633 klass =
RBASIC(mod)->klass;
1640 if (sym == sym_immediate) {
1643 else if (sym == sym_on_blocking) {
1646 else if (sym == sym_never) {
1660 rb_threadptr_pending_interrupt_empty_p(
rb_thread_t *th)
1689 switch (mask_timing) {
1708 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1727 if (rb_threadptr_pending_interrupt_empty_p(th)) {
1739 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1858 rb_thread_s_handle_interrupt(
VALUE self,
VALUE mask_arg)
1877 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
1889 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
1914 rb_thread_pending_interrupt_p(
int argc,
VALUE *argv,
VALUE target_thread)
1916 rb_thread_t *target_th = rb_thread_ptr(target_thread);
1921 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
1931 if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
2000 rb_thread_s_pending_interrupt_p(
int argc,
VALUE *argv,
VALUE self)
2002 return rb_thread_pending_interrupt_p(argc, argv,
GET_THREAD()->
self);
2024 }
while (old != interrupt);
2032 int postponed_job_interrupt = 0;
2036 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2038 int timer_interrupt;
2039 int pending_interrupt;
2047 if (postponed_job_interrupt) {
2072 rb_threadptr_to_kill(th);
2087 if (timer_interrupt) {
2088 uint32_t limits_us = TIME_QUANTUM_USEC;
2101 rb_thread_schedule_limits(limits_us);
2125 if (rb_threadptr_dead(th)) {
2138 if (rb_threadptr_dead(th)) {
2168 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) 2169 #define USE_SIGALTSTACK 2201 if (wfd->
fd == fd) {
2246 thread_raise_m(
int argc,
VALUE *argv,
VALUE self)
2251 threadptr_check_pending_interrupt_queue(target_th);
2252 rb_threadptr_raise(target_th, argc, argv);
2255 if (current_th == target_th) {
2291 rb_threadptr_to_kill(th);
2294 threadptr_check_pending_interrupt_queue(th);
2346 rb_thread_exit(
void)
2384 rb_threadptr_ready(target_th);
2445 "stopping only thread\n\tnote: use sleep to stop forever");
2509 thread_s_current(
VALUE klass)
2528 rb_thread_s_main(
VALUE klass)
2555 rb_thread_s_abort_exc(
void)
2592 rb_thread_s_abort_exc_set(
VALUE self,
VALUE val)
2615 rb_thread_abort_exc(
VALUE thread)
2617 return rb_thread_ptr(thread)->abort_on_exception ?
Qtrue :
Qfalse;
2635 rb_thread_abort_exc_set(
VALUE thread,
VALUE val)
2637 rb_thread_ptr(thread)->abort_on_exception =
RTEST(val);
2660 rb_thread_s_report_exc(
void)
2697 rb_thread_s_report_exc_set(
VALUE self,
VALUE val)
2720 rb_thread_report_exc(
VALUE thread)
2722 return rb_thread_ptr(thread)->report_on_exception ?
Qtrue :
Qfalse;
2740 rb_thread_report_exc_set(
VALUE thread,
VALUE val)
2742 rb_thread_ptr(thread)->report_on_exception =
RTEST(val);
2760 VALUE group = rb_thread_ptr(thread)->thgroup;
2761 return group == 0 ?
Qnil : group;
2769 return th->
to_kill ?
"aborting" :
"run";
2771 if (detail)
return "sleep_forever";
2824 if (rb_threadptr_dead(target_th)) {
2854 rb_thread_alive_p(
VALUE thread)
2856 if (rb_threadptr_dead(rb_thread_ptr(thread))) {
2879 rb_thread_stop_p(
VALUE thread)
2883 if (rb_threadptr_dead(th)) {
2908 rb_thread_safe_level(
VALUE thread)
2910 return INT2NUM(rb_thread_ptr(thread)->ec.safe_level);
2921 rb_thread_getname(
VALUE thread)
2923 return rb_thread_ptr(thread)->name;
2951 native_set_another_thread_name(target_th->
thread_id, name);
2964 rb_thread_to_s(
VALUE thread)
2971 status = thread_status_name(target_th,
TRUE);
2991 static ID recursive_key;
2996 if (
id == recursive_key) {
3003 if (local_storage !=
NULL &&
st_lookup(local_storage,
id, &val)) {
3015 return threadptr_local_aref(rb_thread_ptr(thread),
id);
3082 if (!
id)
return Qnil;
3087 rb_thread_fetch(
int argc,
VALUE *argv,
VALUE self)
3098 if (block_given && argc == 2) {
3099 rb_warn(
"block supersedes default value argument");
3104 if (
id == recursive_key) {
3111 else if (block_given) {
3114 else if (argc == 1) {
3125 if (
id == recursive_key) {
3133 if (!local_storage)
return Qnil;
3134 st_delete_wrap(local_storage,
id);
3138 if (local_storage ==
NULL) {
3154 return threadptr_local_aset(rb_thread_ptr(thread),
id, val);
3205 rb_thread_variable_get(
VALUE thread,
VALUE key)
3252 st_table *local_storage = rb_thread_ptr(
self)->ec.local_storage;
3254 if (!
id || local_storage ==
NULL) {
3257 else if (
st_lookup(local_storage,
id, 0)) {
3275 return vm_living_thread_num(
GET_VM()) == 1;
3293 rb_thread_keys(
VALUE self)
3295 st_table *local_storage = rb_thread_ptr(
self)->ec.local_storage;
3298 if (local_storage) {
3299 st_foreach(local_storage, thread_keys_i, ary);
3329 rb_thread_variables(
VALUE thread)
3358 rb_thread_variable_p(
VALUE thread,
VALUE key)
3367 if (!
RHASH(locals)->ntbl)
3393 rb_thread_priority(
VALUE thread)
3395 return INT2NUM(rb_thread_ptr(thread)->priority);
3426 rb_thread_priority_set(
VALUE thread,
VALUE prio)
3431 #if USE_NATIVE_THREAD_PRIORITY 3433 native_thread_apply_priority(th);
3449 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT) 3486 fds->fdset =
ALLOC(fd_set);
3487 FD_ZERO(fds->fdset);
3493 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3495 if (size <
sizeof(fd_set))
3496 size =
sizeof(fd_set);
3497 dst->maxfd = src->maxfd;
3499 memcpy(dst->fdset, src->fdset, size);
3505 if (fds->fdset)
xfree(fds->fdset);
3514 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3520 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
3521 size_t o = howmany(fds->maxfd, NFDBITS) *
sizeof(fd_mask);
3523 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
3524 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
3527 fds->fdset =
xrealloc(fds->fdset, m);
3528 memset((
char *)fds->fdset + o, 0, m - o);
3530 if (n >= fds->maxfd) fds->maxfd = n + 1;
3543 if (n >= fds->maxfd)
return;
3550 if (n >= fds->maxfd)
return 0;
3551 return FD_ISSET(n, fds->fdset) != 0;
3557 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
3559 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
3561 dst->fdset =
xrealloc(dst->fdset, size);
3562 memcpy(dst->fdset, src, size);
3568 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
3570 if (size <
sizeof(fd_set))
3571 size =
sizeof(fd_set);
3572 dst->maxfd = src->maxfd;
3573 dst->fdset =
xrealloc(dst->fdset, size);
3574 memcpy(dst->fdset, src->fdset, size);
3577 #ifdef __native_client__ 3578 int select(
int nfds, fd_set *readfds, fd_set *writefds,
3579 fd_set *exceptfds,
struct timeval *timeout);
3598 return select(n, r, w, e, timeout);
3601 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0)) 3608 #define FD_ZERO(f) rb_fd_zero(f) 3609 #define FD_SET(i, f) rb_fd_set((i), (f)) 3610 #define FD_CLR(i, f) rb_fd_clr((i), (f)) 3611 #define FD_ISSET(i, f) rb_fd_isset((i), (f)) 3613 #elif defined(_WIN32) 3618 set->capa = FD_SETSIZE;
3619 set->fdset =
ALLOC(fd_set);
3620 FD_ZERO(set->fdset);
3644 for (i = 0; i <
set->fdset->fd_count; i++) {
3645 if (set->fdset->fd_array[i] == s) {
3649 if (set->fdset->fd_count >= (
unsigned)set->capa) {
3650 set->capa = (
set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3651 set->fdset =
xrealloc(set->fdset,
sizeof(
unsigned int) +
sizeof(SOCKET) * set->capa);
3653 set->fdset->fd_array[
set->fdset->fd_count++] = s;
3661 #define FD_ZERO(f) rb_fd_zero(f) 3662 #define FD_SET(i, f) rb_fd_set((i), (f)) 3663 #define FD_CLR(i, f) rb_fd_clr((i), (f)) 3664 #define FD_ISSET(i, f) rb_fd_isset((i), (f)) 3666 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0) 3670 #ifndef rb_fd_no_init 3671 #define rb_fd_no_init(fds) (void)(fds) 3677 if (e == EINTR)
return TRUE;
3679 if (e == ERESTART)
return TRUE;
3684 #define restore_fdset(fds1, fds2) \ 3685 ((fds1) ? rb_fd_dup(fds1, fds2) : (void)0) 3688 update_timeval(
struct timeval *timeout,
double limit)
3691 double d = limit - timeofday();
3693 timeout->
tv_sec = (time_t)d;
3713 #define do_select_update() \ 3714 (restore_fdset(readfds, &orig_read), \ 3715 restore_fdset(writefds, &orig_write), \ 3716 restore_fdset(exceptfds, &orig_except), \ 3717 update_timeval(timeout, limit), \ 3721 limit = timeofday();
3722 limit += (double)timeout->
tv_sec+(
double)timeout->
tv_usec*1e-6;
3723 wait_rest = *timeout;
3724 timeout = &wait_rest;
3727 #define fd_init_copy(f) \ 3728 (f##fds) ? rb_fd_init_copy(&orig_##f, f##fds) : rb_fd_no_init(&orig_##f) 3738 result = native_fd_select(n, readfds, writefds, exceptfds,
3740 if (result < 0) lerrno =
errno;
3746 #define fd_term(f) if (f##fds) rb_fd_term(&orig_##f) 3756 rb_thread_wait_fd_rw(
int fd,
int read)
3761 thread_debug(
"rb_thread_wait_fd_rw(%d, %s)\n", fd, read ?
"read" :
"write");
3772 thread_debug(
"rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ?
"read" :
"write");
3778 rb_thread_wait_fd_rw(fd, 1);
3784 rb_thread_wait_fd_rw(fd, 0);
3792 if (!read && !write && !except) {
3810 return do_select(max, read, write, except, timeout);
3818 #if defined(HAVE_POLL) && defined(__linux__) 3825 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR) 3826 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR) 3827 #define POLLEX_SET (POLLPRI) 3832 ppoll(
struct pollfd *fds, nfds_t nfds,
3833 const struct timespec *ts,
const sigset_t *sigmask)
3840 if (ts->
tv_sec > INT_MAX/1000)
3843 tmp = (int)(ts->
tv_sec * 1000);
3844 tmp2 = (int)(ts->
tv_nsec / (1000 * 1000));
3845 if (INT_MAX - tmp < tmp2)
3848 timeout_ms = (int)(tmp + tmp2);
3854 return poll(fds, nfds, timeout_ms);
3859 update_timespec(
struct timespec *timeout,
double limit)
3862 double d = limit - timeofday();
3864 timeout->
tv_sec = (long)d;
3878 int result = 0, lerrno;
3884 #define poll_update() \ 3885 (update_timespec(timeout, limit), \ 3891 limit = timeofday();
3897 fds.events = (short)events;
3903 result = ppoll(&fds, 1, timeout,
NULL);
3904 if (result < 0) lerrno =
errno;
3908 }
while (result < 0 && retryable(
errno = lerrno) && poll_update());
3909 if (result < 0)
return -1;
3911 if (fds.revents & POLLNVAL) {
3921 if (fds.revents & POLLIN_SET)
3923 if (fds.revents & POLLOUT_SET)
3925 if (fds.revents & POLLEX_SET)
3955 select_single(
VALUE ptr)
3977 select_single_cleanup(
VALUE ptr)
4002 r = (int)
rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4014 #ifdef USE_CONSERVATIVE_STACK_END 4019 *stack_end_p = &stack_end;
4039 timer_thread_function(
void *arg)
4059 if (vm->prove_profile.enable) {
4062 if (vm->during_gc) {
4072 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4073 native_reset_timer_thread();
4080 native_reset_timer_thread();
4087 rb_thread_create_timer_thread();
4090 #if defined(HAVE_WORKING_FORK) 4114 for (i = 2; i <
RARRAY_LEN(methods); i += 3) {
4123 clear_coverage(
void)
4126 if (
RTEST(coverages)) {
4143 rb_vm_living_threads_init(vm);
4144 rb_vm_living_threads_insert(vm, th);
4152 if (th != current_th) {
4153 rb_mutex_abandon_keeping_mutexes(th);
4154 rb_mutex_abandon_locking_mutex(th);
4155 thread_cleanup_func(th,
TRUE);
4163 rb_thread_atfork_internal(th, terminate_atfork_i);
4173 if (th != current_th) {
4174 thread_cleanup_func_before_exec(th);
4182 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4202 thgroup_memsize(
const void *ptr)
4204 return sizeof(
struct thgroup);
4233 thgroup_s_alloc(
VALUE klass)
4287 thgroup_enclose(
VALUE group)
4306 thgroup_enclosed_p(
VALUE group)
4367 "can't move from the enclosed thread group");
4378 thread_shield_mark(
void *ptr)
4385 {thread_shield_mark, 0, 0,},
4386 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4390 thread_shield_alloc(
VALUE klass)
4395 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type)) 4396 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19) 4397 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT) 4398 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT) 4401 rb_thread_shield_waiting_inc(
VALUE b)
4412 rb_thread_shield_waiting_dec(
VALUE b)
4424 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4426 return thread_shield;
4443 if (!mutex)
return Qfalse;
4446 rb_thread_shield_waiting_inc(
self);
4448 rb_thread_shield_waiting_dec(
self);
4455 thread_shield_get_mutex(
VALUE self)
4469 VALUE mutex = thread_shield_get_mutex(
self);
4480 VALUE mutex = thread_shield_get_mutex(
self);
4510 VALUE hash = threadptr_recursive_hash(th);
4514 threadptr_recursive_hash_set(th, hash);
4536 #if SIZEOF_LONG == SIZEOF_VOIDP 4537 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other)) 4538 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP 4539 #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \ 4540 rb_big_eql((obj_id), (other)) : ((obj_id) == (other))) 4546 if (paired_obj_id) {
4581 VALUE other_paired_obj = pair_list;
4603 if (pair_list ==
Qundef) {
4652 p.
list = recursive_list_access(sym);
4657 outermost = outer && !recursive_check(p.
list, ID2SYM(recursive_key), 0);
4659 if (recursive_check(p.
list, p.
objid, pairid)) {
4660 if (outer && !outermost) {
4671 recursive_push(p.
list, ID2SYM(recursive_key), 0);
4674 if (!recursive_pop(p.list, p.objid, p.pairid))
goto invalid;
4675 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0))
goto invalid;
4677 if (result == p.list) {
4711 return exec_recursive(func, obj, 0, arg, 0);
4722 return exec_recursive(func, obj,
rb_obj_id(paired_obj), arg, 0);
4734 return exec_recursive(func, obj, 0, arg, 1);
4746 return exec_recursive(func, obj,
rb_obj_id(paired_obj), arg, 1);
4758 rb_thread_backtrace_m(
int argc,
VALUE *argv,
VALUE thval)
4775 rb_thread_backtrace_locations_m(
int argc,
VALUE *argv,
VALUE thval)
4799 #define rb_intern(str) rb_intern_const(str) 4823 #if THREAD_DEBUG < 0 4869 "stream closed in another thread");
4883 recursive_key =
rb_intern(
"__recursive_key__");
4892 gvl_acquire(th->
vm, th);
4904 rb_thread_create_timer_thread();
4907 (void)native_mutex_trylock;
4926 rb_str_catf(msg,
"\n%d threads, %d sleeps current:%p main thread:%p\n",
4936 mutex->
th, rb_mutex_num_waiting(mutex));
4941 rb_str_catf(msg,
"\n depended by: tb_thread_id:%p", list->
th);
4952 rb_check_deadlock(
rb_vm_t *vm)
4957 if (vm_living_thread_num(vm) > vm->
sleeper)
return;
4958 if (vm_living_thread_num(vm) < vm->
sleeper)
rb_bug(
"sleeper must not be more than vm_living_thread_num(vm)");
4959 if (patrol_thread && patrol_thread !=
GET_THREAD())
return;
4969 if (mutex->
th == th || (!mutex->
th && !list_empty(&mutex->
waitq))) {
4980 argv[1] =
rb_str_new2(
"No live threads left. Deadlock?");
4981 debug_deadlock_check(vm, argv[1]);
5016 long idx = arg / 16;
5030 long idx = arg / 16 * 3 + 2;
5046 return GET_VM()->coverages;
5052 GET_VM()->coverages = coverages;
5053 GET_VM()->coverage_mode = mode;
5085 int mode =
GET_VM()->coverage_mode;
#define GetMutexPtr(obj, tobj)
#define RBASIC_CLEAR_CLASS(obj)
VALUE rb_mutex_lock(VALUE mutex)
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
rb_thread_list_t * join_list
#define RUBY_EVENT_THREAD_END
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
#define RUBY_VM_CHECK_INTS(th)
void rb_warn(const char *fmt,...)
int ruby_thread_has_gvl_p(void)
VALUE rb_ary_pop(VALUE ary)
void rb_bug(const char *fmt,...)
struct rb_mutex_struct * next_mutex
int gettimeofday(struct timeval *, struct timezone *)
void rb_postponed_job_flush(rb_vm_t *vm)
#define RUBY_TYPED_FREE_IMMEDIATELY
VALUE rb_obj_id(VALUE obj)
struct rb_thread_struct * running_thread
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
#define RUBY_VM_SET_INTERRUPT(th)
int pending_interrupt_queue_checked
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
int rb_thread_check_trap_pending(void)
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
VALUE rb_thread_list(void)
#define GetProcPtr(obj, ptr)
int rb_block_given_p(void)
Determines if the current method is given a block.
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
void rb_raise(VALUE exc, const char *fmt,...)
struct rb_thread_struct * th
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
void rb_unblock_function_t(void *)
VALUE rb_ary_delete_at(VALUE ary, long pos)
rb_unblock_function_t * func
int rb_remove_event_hook(rb_event_hook_func_t func)
#define TypedData_Wrap_Struct(klass, data_type, sval)
const char ruby_digitmap[]
VALUE pending_interrupt_mask_stack
VALUE rb_ary_shift(VALUE ary)
#define TypedData_Get_Struct(obj, type, data_type, sval)
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
#define COVERAGE_TARGET_LINES
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
struct rb_thread_struct volatile * th
SOCKET rb_w32_get_osfhandle(int)
#define rb_vm_register_special_exception(sp, e, m)
VALUE rb_thread_stop(void)
#define TH_JUMP_TAG(th, st)
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE passed_block_handler)
VALUE rb_ary_push(VALUE ary, VALUE item)
#define VM_BLOCK_HANDLER_NONE
void rb_thread_wait_for(struct timeval time)
void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flag)
if(len<=MAX_WORD_LENGTH &&len >=MIN_WORD_LENGTH)
VALUE rb_str_concat(VALUE, VALUE)
void rb_signal_exec(rb_thread_t *th, int sig)
struct st_table * rb_hash_tbl_raw(VALUE hash)
VALUE rb_ary_tmp_new(long capa)
unsigned int report_on_exception
void rb_threadptr_setup_exception(rb_thread_t *th, VALUE mesg, VALUE cause)
VALUE rb_thread_current(void)
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
#define OBJ_ID_EQL(obj_id, other)
VALUE rb_thread_alloc(VALUE klass)
VALUE rb_ivar_get(VALUE, ID)
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
VALUE rb_ary_clear(VALUE ary)
int rb_thread_alone(void)
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
VALUE rb_thread_local_aref(VALUE thread, ID id)
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
void rb_gc_mark(VALUE ptr)
VALUE rb_hash_lookup(VALUE hash, VALUE key)
void rb_set_coverages(VALUE coverages, int mode)
#define COVERAGE_INDEX_BRANCHES
VALUE rb_thread_kill(VALUE thread)
VALUE rb_threadptr_backtrace_str_ary(rb_thread_t *th, long lev, long n)
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
An equivalent to ensure clause.
void rb_gc_force_recycle(VALUE obj)
void rb_obj_call_init(VALUE obj, int argc, const VALUE *argv)
Calls #initialize method of obj with the given arguments.
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
#define thread_id_str(th)
void rb_thread_start_timer_thread(void)
#define THROW_DATA_P(err)
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
int ruby_native_thread_p(void)
#define rb_fd_isset(n, f)
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
void rb_hash_foreach(VALUE hash, int(*func)(ANYARGS), VALUE farg)
VALUE rb_thread_wakeup(VALUE thread)
struct timeval rb_time_timeval(VALUE time)
VALUE rb_obj_class(VALUE)
call-seq: obj.class -> class
#define RB_TYPE_P(obj, type)
void rb_reset_random_seed(void)
int rb_thread_fd_writable(int fd)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
call-seq: obj.is_a?(class) -> true or false obj.kind_of?(class) -> true or false
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
#define RUBY_VM_INTERRUPTED_ANY(th)
#define MEMZERO(p, type, n)
#define RUBY_THREAD_PRIORITY_MAX
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
VALUE rb_convert_type_with_id(VALUE, int, const char *, ID)
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval)
RUBY_EXTERN VALUE rb_cObject
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
void * blocking_region_buffer
VALUE rb_default_coverage(int n)
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
void rb_define_const(VALUE, const char *, VALUE)
void rb_thread_atfork_before_exec(void)
void rb_threadptr_check_signal(rb_thread_t *mth)
void rb_thread_stop_timer_thread(void)
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
const VALUE special_exceptions[ruby_special_error_count]
void ruby_thread_init_stack(rb_thread_t *th)
VALUE rb_proc_location(VALUE self)
#define threadptr_initialized(th)
RUBY_EXTERN VALUE rb_cModule
void rb_thread_check_ints(void)
void rb_thread_fd_close(int fd)
VALUE rb_thread_shield_new(void)
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
#define OBJ_FREEZE_RAW(x)
void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo)
void rb_error_frozen(const char *what)
VALUE rb_thread_shield_release(VALUE self)
void rb_thread_atfork(void)
struct list_node wfd_node
VALUE rb_thread_create(VALUE(*fn)(ANYARGS), void *arg)
void rb_throw_obj(VALUE tag, VALUE value)
#define ATOMIC_CAS(var, oldval, newval)
void rb_sys_fail(const char *mesg)
VALUE local_storage_recursive_hash
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval)
#define RARRAY_CONST_PTR(a)
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
#define rb_thread_set_current(th)
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
struct rb_mutex_struct * keeping_mutexes
VALUE rb_thread_shield_wait(VALUE self)
VALUE rb_sprintf(const char *format,...)
int rb_get_next_signal(void)
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
VALUE rb_to_symbol(VALUE name)
#define rb_fd_copy(d, s, n)
VALUE rb_class_path(VALUE)
#define do_select_update()
int rb_threadptr_reset_raised(rb_thread_t *th)
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
VALUE rb_class_inherited_p(VALUE mod, VALUE arg)
call-seq: mod <= other -> true, false, or nil
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
struct list_head waiting_fds
unsigned char buf[MIME_BUF_SIZE]
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
void rb_thread_sleep_forever(void)
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
#define THREAD_SHIELD_WAITING_MASK
#define SAVE_ROOT_JMPBUF(th, stmt)
const VALUE * rb_vm_proc_local_ep(VALUE proc)
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_)
RUBY_EXTERN VALUE rb_cThread
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
union select_args::@118 as
struct rb_thread_struct * main_thread
#define COVERAGE_INDEX_LINES
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
#define RUBY_EVENT_THREAD_BEGIN
void rb_gc_set_stack_end(VALUE **stack_end_p)
int clock_gettime(clockid_t, struct timespec *)
void rb_thread_schedule(void)
#define rb_enc_asciicompat(enc)
VALUE rb_str_new_cstr(const char *)
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
rb_atomic_t interrupt_flag
void rb_thread_wait_fd(int fd)
VALUE rb_blocking_function_t(void *)
VALUE rb_thread_main(void)
struct rb_execution_context_struct::@143 machine
#define StringValueCStr(v)
VALUE(* first_func)(ANYARGS)
enum rb_thread_status status
void rb_thread_sleep(int sec)
#define thread_start_func_2(th, st, rst)
void rb_thread_sleep_deadly(void)
enum rb_thread_status prev_status
#define RARRAY_ASET(a, i, v)
void rb_thread_recycle_stack_release(VALUE *)
void rb_thread_terminate_all(void)
rb_encoding * rb_enc_get(VALUE obj)
#define THREAD_SHIELD_WAITING_SHIFT
void rb_reset_coverages(void)
VALUE rb_ident_hash_new(void)
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
void rb_thread_execute_interrupts(VALUE thval)
VALUE(* func)(VALUE, VALUE, int)
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
#define RARRAY_AREF(a, i)
#define COVERAGE_TARGET_METHODS
#define RUBY_INTERNAL_EVENT_SWITCH
unsigned long interrupt_mask
VALUE rb_block_proc(void)
#define RUBY_THREAD_PRIORITY_MIN
#define RBASIC_CLASS(obj)
VALUE rb_thread_group(VALUE thread)
struct rb_unblock_callback unblock
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
VALUE rb_hash_aref(VALUE hash, VALUE key)
#define rb_fd_select(n, rfds, wfds, efds, timeout)
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
VALUE rb_str_catf(VALUE str, const char *format,...)
void rb_thread_reset_timer_thread(void)
rb_nativethread_id_t thread_id
rb_nativethread_lock_t thread_destruct_lock
int rb_signal_buff_size(void)
RUBY_EXTERN char * strerror(int)
VALUE rb_mutex_unlock(VALUE mutex)
struct rb_encoding_entry * list
VALUE rb_thread_shield_destroy(VALUE self)
VALUE rb_str_cat_cstr(VALUE, const char *)
unsigned int thread_report_on_exception
#define TypedData_Make_Struct(klass, type, data_type, sval)
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
VALUE rb_ary_dup(VALUE ary)
VALUE rb_ary_join(VALUE ary, VALUE sep)
VALUE rb_ary_tmp_new_fill(long capa)
double rb_num2dbl(VALUE)
Converts a Numeric object to double.
#define rb_fd_resize(n, f)
#define rb_thread_shield_waiting(b)
rb_execution_context_t ec
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
#define RUBY_EVENT_COVERAGE
RUBY_EXTERN VALUE rb_eIOError
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
void rb_threadptr_trap_interrupt(rb_thread_t *th)
#define rb_fd_init_copy(d, s)
VALUE rb_str_new_frozen(VALUE)
struct rb_thread_list_struct * next
#define RUBY_VM_INTERRUPTED(th)
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
struct list_head living_threads
void rb_vm_gvl_destroy(rb_vm_t *vm)
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
#define RUBY_TYPED_DEFAULT_FREE
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start))
#define GetThreadShieldPtr(obj)
rb_nativethread_lock_t interrupt_lock
VALUE ruby_vm_special_exception_copy(VALUE)
VALUE pending_interrupt_queue
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
VALUE rb_get_coverages(void)
#define COVERAGE_TARGET_BRANCHES
int rb_notify_fd_close(int fd)
void rb_threadptr_interrupt(rb_thread_t *th)
VALUE rb_thread_wakeup_alive(VALUE thread)
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
VALUE rb_thread_run(VALUE thread)
void rb_threadptr_signal_exit(rb_thread_t *th)
#define TYPEOF_TIMEVAL_TV_SEC
int rb_thread_to_be_killed(VALUE thread)
int rb_thread_interrupted(VALUE thval)
int rb_threadptr_set_raised(rb_thread_t *th)
RUBY_EXTERN void rb_write_error_str(VALUE mesg)
#define COVERAGE_INDEX_METHODS
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)