Ruby  2.5.0dev(2017-10-22revision60238)
thread.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  thread.c -
4 
5  $Author$
6 
7  Copyright (C) 2004-2007 Koichi Sasada
8 
9 **********************************************************************/
10 
11 /*
12  YARV Thread Design
13 
14  model 1: Userlevel Thread
15  Same as traditional ruby thread.
16 
17  model 2: Native Thread with Global VM lock
18  Using pthread (or Windows thread) and Ruby threads run concurrent.
19 
20  model 3: Native Thread with fine grain lock
21  Using pthread and Ruby threads run concurrent or parallel.
22 
23  model 4: M:N User:Native threads with Global VM lock
24  Combination of model 1 and 2
25 
26  model 5: M:N User:Native thread with fine grain lock
27  Combination of model 1 and 3
28 
29 ------------------------------------------------------------------------
30 
31  model 2:
32  A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33  When thread scheduling, running thread release GVL. If running thread
34  try blocking operation, this thread must release GVL and another
35  thread can continue this flow. After blocking operation, thread
36  must check interrupt (RUBY_VM_CHECK_INTS).
37 
38  Every VM can run parallel.
39 
40  Ruby threads are scheduled by OS thread scheduler.
41 
42 ------------------------------------------------------------------------
43 
44  model 3:
45  Every threads run concurrent or parallel and to access shared object
46  exclusive access control is needed. For example, to access String
47  object or Array object, fine grain lock must be locked every time.
48  */
49 
50 
51 /*
52  * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53  * 2.15 or later and set _FORTIFY_SOURCE > 0.
54  * However, the implementation is wrong. Even though Linux's select(2)
55  * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56  * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57  * it doesn't work correctly and makes program abort. Therefore we need to
58  * disable FORTIFY_SOURCE until glibc fixes it.
59  */
60 #undef _FORTIFY_SOURCE
61 #undef __USE_FORTIFY_LEVEL
62 #define __USE_FORTIFY_LEVEL 0
63 
64 /* for model 2 */
65 
66 #include "eval_intern.h"
67 #include "gc.h"
68 #include "timev.h"
69 #include "ruby/io.h"
70 #include "ruby/thread.h"
71 #include "ruby/thread_native.h"
72 #include "ruby/debug.h"
73 #include "internal.h"
74 
75 #ifndef USE_NATIVE_THREAD_PRIORITY
76 #define USE_NATIVE_THREAD_PRIORITY 0
77 #define RUBY_THREAD_PRIORITY_MAX 3
78 #define RUBY_THREAD_PRIORITY_MIN -3
79 #endif
80 
81 #ifndef THREAD_DEBUG
82 #define THREAD_DEBUG 0
83 #endif
84 
85 static VALUE rb_cThreadShield;
86 
87 static VALUE sym_immediate;
88 static VALUE sym_on_blocking;
89 static VALUE sym_never;
90 static ID id_locals;
91 
92 static void sleep_timeval(rb_thread_t *th, struct timeval time, int spurious_check);
93 static void sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check);
94 static void sleep_forever(rb_thread_t *th, int nodeadlock, int spurious_check);
95 static void rb_thread_sleep_deadly_allow_spurious_wakeup(void);
96 static double timeofday(void);
97 static int rb_threadptr_dead(rb_thread_t *th);
98 static void rb_check_deadlock(rb_vm_t *vm);
99 static int rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th);
100 
101 #define eKillSignal INT2FIX(0)
102 #define eTerminateSignal INT2FIX(1)
103 static volatile int system_working = 1;
104 
105 struct waiting_fd {
106  struct list_node wfd_node; /* <=> vm.waiting_fds */
108  int fd;
109 };
110 
111 inline static void
112 st_delete_wrap(st_table *table, st_data_t key)
113 {
114  st_delete(table, &key, 0);
115 }
116 
117 /********************************************************************************/
118 
119 #define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
120 
122  enum rb_thread_status prev_status;
123 };
124 
125 static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
126 static void unblock_function_clear(rb_thread_t *th);
127 
128 static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
129  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
130 static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
131 
132 #ifdef __ia64
133 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th) \
134  do{(th)->machine.register_stack_end = rb_ia64_bsp();}while(0)
135 #else
136 #define RB_GC_SAVE_MACHINE_REGISTER_STACK(th)
137 #endif
138 #define RB_GC_SAVE_MACHINE_CONTEXT(th) \
139  do { \
140  FLUSH_REGISTER_WINDOWS; \
141  RB_GC_SAVE_MACHINE_REGISTER_STACK(th); \
142  setjmp((th)->ec.machine.regs); \
143  SET_MACHINE_STACK_END(&(th)->ec.machine.stack_end); \
144  } while (0)
145 
146 #define GVL_UNLOCK_BEGIN() do { \
147  rb_thread_t *_th_stored = GET_THREAD(); \
148  RB_GC_SAVE_MACHINE_CONTEXT(_th_stored); \
149  gvl_release(_th_stored->vm);
150 
151 #define GVL_UNLOCK_END() \
152  gvl_acquire(_th_stored->vm, _th_stored); \
153  rb_thread_set_current(_th_stored); \
154 } while(0)
155 
156 #ifdef __GNUC__
157 #ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
158 #define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
159 #else
160 #define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
161 #endif
162 #else
163 #define only_if_constant(expr, notconst) notconst
164 #endif
165 #define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted) do { \
166  rb_thread_t *__th = GET_THREAD(); \
167  struct rb_blocking_region_buffer __region; \
168  if (blocking_region_begin(__th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
169  /* always return true unless fail_if_interrupted */ \
170  !only_if_constant(fail_if_interrupted, TRUE)) { \
171  exec; \
172  blocking_region_end(__th, &__region); \
173  }; \
174 } while(0)
175 
176 #define RUBY_VM_CHECK_INTS_BLOCKING(th) vm_check_ints_blocking(th)
177 static inline void
178 vm_check_ints_blocking(rb_thread_t *th)
179 {
180  if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
181  if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(th))) return;
182  }
183  else {
185 
187  }
189 }
190 
191 static int
192 vm_living_thread_num(rb_vm_t *vm)
193 {
194  return (int)vm->living_thread_num;
195 }
196 
197 #if THREAD_DEBUG
198 #ifdef HAVE_VA_ARGS_MACRO
199 void rb_thread_debug(const char *file, int line, const char *fmt, ...);
200 #define thread_debug(...) rb_thread_debug(__FILE__, __LINE__, __VA_ARGS__)
201 #define POSITION_FORMAT "%s:%d:"
202 #define POSITION_ARGS ,file, line
203 #else
204 void rb_thread_debug(const char *fmt, ...);
205 #define thread_debug rb_thread_debug
206 #define POSITION_FORMAT
207 #define POSITION_ARGS
208 #endif
209 
210 # ifdef NON_SCALAR_THREAD_ID
211 #define fill_thread_id_string ruby_fill_thread_id_string
212 const char *
213 ruby_fill_thread_id_string(rb_nativethread_id_t thid, rb_thread_id_string_t buf)
214 {
215  extern const char ruby_digitmap[];
216  size_t i;
217 
218  buf[0] = '0';
219  buf[1] = 'x';
220  for (i = 0; i < sizeof(thid); i++) {
221 # ifdef LITTLE_ENDIAN
222  size_t j = sizeof(thid) - i - 1;
223 # else
224  size_t j = i;
225 # endif
226  unsigned char c = (unsigned char)((char *)&thid)[j];
227  buf[2 + i * 2] = ruby_digitmap[(c >> 4) & 0xf];
228  buf[3 + i * 2] = ruby_digitmap[c & 0xf];
229  }
230  buf[sizeof(rb_thread_id_string_t)-1] = '\0';
231  return buf;
232 }
233 # define fill_thread_id_str(th) fill_thread_id_string((th)->thread_id, (th)->thread_id_string)
234 # define thread_id_str(th) ((th)->thread_id_string)
235 # define PRI_THREAD_ID "s"
236 # endif
237 
238 # if THREAD_DEBUG < 0
239 static int rb_thread_debug_enabled;
240 
241 /*
242  * call-seq:
243  * Thread.DEBUG -> num
244  *
245  * Returns the thread debug level. Available only if compiled with
246  * THREAD_DEBUG=-1.
247  */
248 
249 static VALUE
250 rb_thread_s_debug(void)
251 {
252  return INT2NUM(rb_thread_debug_enabled);
253 }
254 
255 /*
256  * call-seq:
257  * Thread.DEBUG = num
258  *
259  * Sets the thread debug level. Available only if compiled with
260  * THREAD_DEBUG=-1.
261  */
262 
263 static VALUE
264 rb_thread_s_debug_set(VALUE self, VALUE val)
265 {
266  rb_thread_debug_enabled = RTEST(val) ? NUM2INT(val) : 0;
267  return val;
268 }
269 # else
270 # define rb_thread_debug_enabled THREAD_DEBUG
271 # endif
272 #else
273 #define thread_debug if(0)printf
274 #endif
275 
276 #ifndef fill_thread_id_str
277 # define fill_thread_id_string(thid, buf) (void *)(thid)
278 # define fill_thread_id_str(th) (void)0
279 # define thread_id_str(th) ((void *)(th)->thread_id)
280 # define PRI_THREAD_ID "p"
281 #endif
282 
283 #ifndef __ia64
284 #define thread_start_func_2(th, st, rst) thread_start_func_2(th, st)
285 #endif
286 NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start,
287  VALUE *register_stack_start));
288 static void timer_thread_function(void *);
289 
290 #if defined(_WIN32)
291 #include "thread_win32.c"
292 
293 #define DEBUG_OUT() \
294  WaitForSingleObject(&debug_mutex, INFINITE); \
295  printf(POSITION_FORMAT"%#lx - %s" POSITION_ARGS, GetCurrentThreadId(), buf); \
296  fflush(stdout); \
297  ReleaseMutex(&debug_mutex);
298 
299 #elif defined(HAVE_PTHREAD_H)
300 #include "thread_pthread.c"
301 
302 #define DEBUG_OUT() \
303  pthread_mutex_lock(&debug_mutex); \
304  printf(POSITION_FORMAT"%"PRI_THREAD_ID" - %s" POSITION_ARGS, \
305  fill_thread_id_string(pthread_self(), thread_id_string), buf); \
306  fflush(stdout); \
307  pthread_mutex_unlock(&debug_mutex);
308 
309 #else
310 #error "unsupported thread type"
311 #endif
312 
313 #if THREAD_DEBUG
314 static int debug_mutex_initialized = 1;
315 static rb_nativethread_lock_t debug_mutex;
316 
317 void
318 rb_thread_debug(
319 #ifdef HAVE_VA_ARGS_MACRO
320  const char *file, int line,
321 #endif
322  const char *fmt, ...)
323 {
324  va_list args;
325  char buf[BUFSIZ];
326 #ifdef NON_SCALAR_THREAD_ID
327  rb_thread_id_string_t thread_id_string;
328 #endif
329 
330  if (!rb_thread_debug_enabled) return;
331 
332  if (debug_mutex_initialized == 1) {
333  debug_mutex_initialized = 0;
334  native_mutex_initialize(&debug_mutex);
335  }
336 
337  va_start(args, fmt);
338  vsnprintf(buf, BUFSIZ, fmt, args);
339  va_end(args);
340 
341  DEBUG_OUT();
342 }
343 #endif
344 
345 #include "thread_sync.c"
346 
347 void
349 {
350  gvl_release(vm);
351  gvl_destroy(vm);
352  native_mutex_destroy(&vm->thread_destruct_lock);
353 }
354 
355 void
356 rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
357 {
358  native_mutex_initialize(lock);
359 }
360 
361 void
362 rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
363 {
364  native_mutex_destroy(lock);
365 }
366 
367 void
368 rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
369 {
370  native_mutex_lock(lock);
371 }
372 
373 void
374 rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
375 {
376  native_mutex_unlock(lock);
377 }
378 
379 static int
380 unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
381 {
382  do {
383  if (fail_if_interrupted) {
384  if (RUBY_VM_INTERRUPTED_ANY(th)) {
385  return FALSE;
386  }
387  }
388  else {
389  RUBY_VM_CHECK_INTS(th);
390  }
391 
392  native_mutex_lock(&th->interrupt_lock);
393  } while (RUBY_VM_INTERRUPTED_ANY(th) &&
394  (native_mutex_unlock(&th->interrupt_lock), TRUE));
395 
396  VM_ASSERT(th->unblock.func == NULL);
397 
398  th->unblock.func = func;
399  th->unblock.arg = arg;
400  native_mutex_unlock(&th->interrupt_lock);
401 
402  return TRUE;
403 }
404 
405 static void
406 unblock_function_clear(rb_thread_t *th)
407 {
408  native_mutex_lock(&th->interrupt_lock);
409  th->unblock.func = NULL;
410  native_mutex_unlock(&th->interrupt_lock);
411 }
412 
413 static void
414 rb_threadptr_interrupt_common(rb_thread_t *th, int trap)
415 {
416  native_mutex_lock(&th->interrupt_lock);
417  if (trap) {
419  }
420  else {
422  }
423  if (th->unblock.func != NULL) {
424  (th->unblock.func)(th->unblock.arg);
425  }
426  else {
427  /* none */
428  }
429  native_mutex_unlock(&th->interrupt_lock);
430 }
431 
432 void
434 {
435  rb_threadptr_interrupt_common(th, 0);
436 }
437 
438 void
440 {
441  rb_threadptr_interrupt_common(th, 1);
442 }
443 
444 static void
445 terminate_all(rb_vm_t *vm, const rb_thread_t *main_thread)
446 {
447  rb_thread_t *th = 0;
448 
449  list_for_each(&vm->living_threads, th, vmlt_node) {
450  if (th != main_thread) {
451  thread_debug("terminate_i: %p\n", (void *)th);
454  }
455  else {
456  thread_debug("terminate_i: main thread (%p)\n", (void *)th);
457  }
458  }
459 }
460 
461 void
463 {
464  const char *err;
465  rb_mutex_t *mutex;
466  rb_mutex_t *mutexes = th->keeping_mutexes;
467 
468  while (mutexes) {
469  mutex = mutexes;
470  /* rb_warn("mutex #<%p> remains to be locked by terminated thread",
471  mutexes); */
472  mutexes = mutex->next_mutex;
473  err = rb_mutex_unlock_th(mutex, th);
474  if (err) rb_bug("invalid keeping_mutexes: %s", err);
475  }
476 }
477 
478 void
480 {
481  rb_thread_t *volatile th = GET_THREAD(); /* main thread */
482  rb_vm_t *volatile vm = th->vm;
483  volatile int sleeping = 0;
484 
485  if (vm->main_thread != th) {
486  rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
487  (void *)vm->main_thread, (void *)th);
488  }
489 
490  /* unlock all locking mutexes */
492 
493  TH_PUSH_TAG(th);
494  if (TH_EXEC_TAG() == TAG_NONE) {
495  retry:
496  thread_debug("rb_thread_terminate_all (main thread: %p)\n", (void *)th);
497  terminate_all(vm, th);
498 
499  while (vm_living_thread_num(vm) > 1) {
500  /*
501  * Thread exiting routine in thread_start_func_2 notify
502  * me when the last sub-thread exit.
503  */
504  sleeping = 1;
505  native_sleep(th, 0);
507  sleeping = 0;
508  }
509  }
510  else {
511  /*
512  * When caught an exception (e.g. Ctrl+C), let's broadcast
513  * kill request again to ensure killing all threads even
514  * if they are blocked on sleep, mutex, etc.
515  */
516  if (sleeping) {
517  sleeping = 0;
518  goto retry;
519  }
520  }
521  TH_POP_TAG();
522 }
523 
524 static void
525 thread_cleanup_func_before_exec(void *th_ptr)
526 {
527  rb_thread_t *th = th_ptr;
528  th->status = THREAD_KILLED;
530 #ifdef __ia64
531  th->ec.machine.register_stack_start = th->ec.machine.register_stack_end = NULL;
532 #endif
533 }
534 
535 static void
536 thread_cleanup_func(void *th_ptr, int atfork)
537 {
538  rb_thread_t *th = th_ptr;
539 
540  th->locking_mutex = Qfalse;
541  thread_cleanup_func_before_exec(th_ptr);
542 
543  /*
544  * Unfortunately, we can't release native threading resource at fork
545  * because libc may have unstable locking state therefore touching
546  * a threading resource may cause a deadlock.
547  */
548  if (atfork)
549  return;
550 
551  native_mutex_destroy(&th->interrupt_lock);
552  native_thread_destroy(th);
553 }
554 
555 static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
556 static VALUE rb_thread_to_s(VALUE thread);
557 
558 void
560 {
561  native_thread_init_stack(th);
562 }
563 
564 const VALUE *
566 {
567  const VALUE *ep = vm_proc_ep(proc);
568 
569  if (ep) {
570  return rb_vm_ep_local_ep(ep);
571  }
572  else {
573  return NULL;
574  }
575 }
576 
577 static void
578 thread_do_start(rb_thread_t *th, VALUE args)
579 {
580  native_set_thread_name(th);
581  if (!th->first_func) {
582  rb_proc_t *proc;
583  GetProcPtr(th->first_proc, proc);
584  th->ec.errinfo = Qnil;
586  th->ec.root_svar = Qfalse;
588  th->value = rb_vm_invoke_proc(th, proc,
589  (int)RARRAY_LEN(args), RARRAY_CONST_PTR(args),
591  EXEC_EVENT_HOOK(th, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
592  }
593  else {
594  th->value = (*th->first_func)((void *)args);
595  }
596 }
597 
598 static int
599 thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start)
600 {
601  enum ruby_tag_type state;
602  VALUE args = th->first_args;
603  rb_thread_list_t *join_list;
604  rb_thread_t *main_th;
605  VALUE errinfo = Qnil;
606 # ifdef USE_SIGALTSTACK
607  void rb_register_sigaltstack(rb_thread_t *th);
608 
609  rb_register_sigaltstack(th);
610 # endif
611 
612  if (th == th->vm->main_thread)
613  rb_bug("thread_start_func_2 must not be used for main thread");
614 
615  ruby_thread_set_native(th);
616 
617  th->ec.machine.stack_start = stack_start;
618 #ifdef __ia64
619  th->ec.machine.register_stack_start = register_stack_start;
620 #endif
621  thread_debug("thread start: %p\n", (void *)th);
622 
623  gvl_acquire(th->vm, th);
624  {
625  thread_debug("thread start (get lock): %p\n", (void *)th);
627 
628  TH_PUSH_TAG(th);
629  if ((state = EXEC_TAG()) == TAG_NONE) {
630  SAVE_ROOT_JMPBUF(th, thread_do_start(th, args));
631  }
632  else {
633  errinfo = th->ec.errinfo;
634  if (state == TAG_FATAL) {
635  /* fatal error within this thread, need to stop whole script */
636  }
637  else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
638  /* exit on main_thread. */
639  }
640  else {
641  if (th->report_on_exception) {
642  VALUE mesg = rb_thread_to_s(th->self);
643  rb_str_cat_cstr(mesg, " terminated with exception:\n");
644  rb_write_error_str(mesg);
645  rb_threadptr_error_print(th, errinfo);
646  }
647  if (th->vm->thread_abort_on_exception ||
648  th->abort_on_exception || RTEST(ruby_debug)) {
649  /* exit on main_thread */
650  }
651  else {
652  errinfo = Qnil;
653  }
654  }
655  th->value = Qnil;
656  }
657 
658  th->status = THREAD_KILLED;
659  thread_debug("thread end: %p\n", (void *)th);
660 
661  main_th = th->vm->main_thread;
662  if (main_th == th) {
663  ruby_stop(0);
664  }
665  if (RB_TYPE_P(errinfo, T_OBJECT)) {
666  /* treat with normal error object */
667  rb_threadptr_raise(main_th, 1, &errinfo);
668  }
669  TH_POP_TAG();
670 
671  /* locking_mutex must be Qfalse */
672  if (th->locking_mutex != Qfalse) {
673  rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
674  (void *)th, th->locking_mutex);
675  }
676 
677  /* delete self other than main thread from living_threads */
678  rb_vm_living_threads_remove(th->vm, th);
679  if (main_th->status == THREAD_KILLED && rb_thread_alone()) {
680  /* I'm last thread. wake up main thread from rb_thread_terminate_all */
681  rb_threadptr_interrupt(main_th);
682  }
683 
684  /* wake up joining threads */
685  join_list = th->join_list;
686  while (join_list) {
687  rb_threadptr_interrupt(join_list->th);
688  switch (join_list->th->status) {
690  join_list->th->status = THREAD_RUNNABLE;
691  default: break;
692  }
693  join_list = join_list->next;
694  }
695 
697  rb_check_deadlock(th->vm);
698 
699  rb_thread_recycle_stack_release(th->ec.vm_stack);
700  th->ec.vm_stack = NULL;
701  }
702  native_mutex_lock(&th->vm->thread_destruct_lock);
703  /* make sure vm->running_thread never point me after this point.*/
704  th->vm->running_thread = NULL;
705  native_mutex_unlock(&th->vm->thread_destruct_lock);
706  thread_cleanup_func(th, FALSE);
707  gvl_release(th->vm);
708 
709  return 0;
710 }
711 
712 static VALUE
713 thread_create_core(VALUE thval, VALUE args, VALUE (*fn)(ANYARGS))
714 {
715  rb_thread_t *th = rb_thread_ptr(thval), *current_th = GET_THREAD();
716  int err;
717 
718  if (OBJ_FROZEN(current_th->thgroup)) {
720  "can't start a new thread (frozen ThreadGroup)");
721  }
722 
723  /* setup thread environment */
724  th->first_func = fn;
725  th->first_proc = fn ? Qfalse : rb_block_proc();
726  th->first_args = args; /* GC: shouldn't put before above line */
727 
728  th->priority = current_th->priority;
729  th->thgroup = current_th->thgroup;
730 
733  th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
735 
736  th->interrupt_mask = 0;
737 
738  native_mutex_initialize(&th->interrupt_lock);
740 
741  /* kick thread */
742  err = native_thread_create(th);
743  if (err) {
744  th->status = THREAD_KILLED;
745  rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
746  }
747  rb_vm_living_threads_insert(th->vm, th);
748  return thval;
749 }
750 
751 #define threadptr_initialized(th) ((th)->first_args != 0)
752 
753 /*
754  * call-seq:
755  * Thread.new { ... } -> thread
756  * Thread.new(*args, &proc) -> thread
757  * Thread.new(*args) { |args| ... } -> thread
758  *
759  * Creates a new thread executing the given block.
760  *
761  * Any +args+ given to ::new will be passed to the block:
762  *
763  * arr = []
764  * a, b, c = 1, 2, 3
765  * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
766  * arr #=> [1, 2, 3]
767  *
768  * A ThreadError exception is raised if ::new is called without a block.
769  *
770  * If you're going to subclass Thread, be sure to call super in your
771  * +initialize+ method, otherwise a ThreadError will be raised.
772  */
773 static VALUE
774 thread_s_new(int argc, VALUE *argv, VALUE klass)
775 {
776  rb_thread_t *th;
777  VALUE thread = rb_thread_alloc(klass);
778 
779  if (GET_VM()->main_thread->status == THREAD_KILLED)
780  rb_raise(rb_eThreadError, "can't alloc thread");
781 
782  rb_obj_call_init(thread, argc, argv);
783  th = rb_thread_ptr(thread);
784  if (!threadptr_initialized(th)) {
785  rb_raise(rb_eThreadError, "uninitialized thread - check `%"PRIsVALUE"#initialize'",
786  klass);
787  }
788  return thread;
789 }
790 
791 /*
792  * call-seq:
793  * Thread.start([args]*) {|args| block } -> thread
794  * Thread.fork([args]*) {|args| block } -> thread
795  *
796  * Basically the same as ::new. However, if class Thread is subclassed, then
797  * calling +start+ in that subclass will not invoke the subclass's
798  * +initialize+ method.
799  */
800 
801 static VALUE
802 thread_start(VALUE klass, VALUE args)
803 {
804  return thread_create_core(rb_thread_alloc(klass), args, 0);
805 }
806 
807 /* :nodoc: */
808 static VALUE
809 thread_initialize(VALUE thread, VALUE args)
810 {
811  rb_thread_t *th = rb_thread_ptr(thread);
812 
813  if (!rb_block_given_p()) {
814  rb_raise(rb_eThreadError, "must be called with a block");
815  }
816  else if (th->first_args) {
817  VALUE proc = th->first_proc, loc;
818  if (!proc || !RTEST(loc = rb_proc_location(proc))) {
819  rb_raise(rb_eThreadError, "already initialized thread");
820  }
822  "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
823  RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
824  }
825  else {
826  return thread_create_core(thread, args, 0);
827  }
828 }
829 
830 VALUE
831 rb_thread_create(VALUE (*fn)(ANYARGS), void *arg)
832 {
833  return thread_create_core(rb_thread_alloc(rb_cThread), (VALUE)arg, fn);
834 }
835 
836 
837 /* +infty, for this purpose */
838 #define DELAY_INFTY 1E30
839 
840 struct join_arg {
841  rb_thread_t *target, *waiting;
842  double delay;
843 };
844 
845 static VALUE
846 remove_from_join_list(VALUE arg)
847 {
848  struct join_arg *p = (struct join_arg *)arg;
849  rb_thread_t *target_th = p->target, *th = p->waiting;
850 
851  if (target_th->status != THREAD_KILLED) {
852  rb_thread_list_t **p = &target_th->join_list;
853 
854  while (*p) {
855  if ((*p)->th == th) {
856  *p = (*p)->next;
857  break;
858  }
859  p = &(*p)->next;
860  }
861  }
862 
863  return Qnil;
864 }
865 
866 static VALUE
867 thread_join_sleep(VALUE arg)
868 {
869  struct join_arg *p = (struct join_arg *)arg;
870  rb_thread_t *target_th = p->target, *th = p->waiting;
871  const int forever = p->delay == DELAY_INFTY;
872  const double limit = forever ? 0 : timeofday() + p->delay;
873 
874  while (target_th->status != THREAD_KILLED) {
875  if (forever) {
876  sleep_forever(th, TRUE, FALSE);
877  }
878  else {
879  double now = timeofday();
880  if (now > limit) {
881  thread_debug("thread_join: timeout (thid: %"PRI_THREAD_ID")\n",
882  thread_id_str(target_th));
883  return Qfalse;
884  }
885  sleep_wait_for_interrupt(th, limit - now, 0);
886  }
887  thread_debug("thread_join: interrupted (thid: %"PRI_THREAD_ID")\n",
888  thread_id_str(target_th));
889  }
890  return Qtrue;
891 }
892 
893 static VALUE
894 thread_join(rb_thread_t *target_th, double delay)
895 {
896  rb_thread_t *th = GET_THREAD();
897  struct join_arg arg;
898 
899  if (th == target_th) {
900  rb_raise(rb_eThreadError, "Target thread must not be current thread");
901  }
902  if (GET_VM()->main_thread == target_th) {
903  rb_raise(rb_eThreadError, "Target thread must not be main thread");
904  }
905 
906  arg.target = target_th;
907  arg.waiting = th;
908  arg.delay = delay;
909 
910  thread_debug("thread_join (thid: %"PRI_THREAD_ID")\n", thread_id_str(target_th));
911 
912  if (target_th->status != THREAD_KILLED) {
914  list.next = target_th->join_list;
915  list.th = th;
916  target_th->join_list = &list;
917  if (!rb_ensure(thread_join_sleep, (VALUE)&arg,
918  remove_from_join_list, (VALUE)&arg)) {
919  return Qnil;
920  }
921  }
922 
923  thread_debug("thread_join: success (thid: %"PRI_THREAD_ID")\n",
924  thread_id_str(target_th));
925 
926  if (target_th->ec.errinfo != Qnil) {
927  VALUE err = target_th->ec.errinfo;
928 
929  if (FIXNUM_P(err)) {
930  switch (err) {
931  case INT2FIX(TAG_FATAL):
932  /* OK. killed. */
933  break;
934  default:
935  rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
936  }
937  }
938  else if (THROW_DATA_P(target_th->ec.errinfo)) {
939  rb_bug("thread_join: THROW_DATA should not reach here.");
940  }
941  else {
942  /* normal exception */
943  rb_exc_raise(err);
944  }
945  }
946  return target_th->self;
947 }
948 
949 /*
950  * call-seq:
951  * thr.join -> thr
952  * thr.join(limit) -> thr
953  *
954  * The calling thread will suspend execution and run this +thr+.
955  *
956  * Does not return until +thr+ exits or until the given +limit+ seconds have
957  * passed.
958  *
959  * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
960  * returned.
961  *
962  * Any threads not joined will be killed when the main program exits.
963  *
964  * If +thr+ had previously raised an exception and the ::abort_on_exception or
965  * $DEBUG flags are not set, (so the exception has not yet been processed), it
966  * will be processed at this time.
967  *
968  * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
969  * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
970  * x.join # Let thread x finish, thread a will be killed on exit.
971  * #=> "axyz"
972  *
973  * The following example illustrates the +limit+ parameter.
974  *
975  * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
976  * puts "Waiting" until y.join(0.15)
977  *
978  * This will produce:
979  *
980  * tick...
981  * Waiting
982  * tick...
983  * Waiting
984  * tick...
985  * tick...
986  */
987 
988 static VALUE
989 thread_join_m(int argc, VALUE *argv, VALUE self)
990 {
991  double delay = DELAY_INFTY;
992  VALUE limit;
993 
994  rb_scan_args(argc, argv, "01", &limit);
995  if (!NIL_P(limit)) {
996  delay = rb_num2dbl(limit);
997  }
998 
999  return thread_join(rb_thread_ptr(self), delay);
1000 }
1001 
1002 /*
1003  * call-seq:
1004  * thr.value -> obj
1005  *
1006  * Waits for +thr+ to complete, using #join, and returns its value or raises
1007  * the exception which terminated the thread.
1008  *
1009  * a = Thread.new { 2 + 2 }
1010  * a.value #=> 4
1011  *
1012  * b = Thread.new { raise 'something went wrong' }
1013  * b.value #=> RuntimeError: something went wrong
1014  */
1015 
1016 static VALUE
1017 thread_value(VALUE self)
1018 {
1019  rb_thread_t *th = rb_thread_ptr(self);
1020  thread_join(th, DELAY_INFTY);
1021  return th->value;
1022 }
1023 
1024 /*
1025  * Thread Scheduling
1026  */
1027 
1028 /*
1029  * The type of tv_sec in struct timeval is time_t in POSIX.
1030  * But several systems violate POSIX.
1031  *
1032  * OpenBSD 5.2 (amd64):
1033  * time_t: int (signed 32bit integer)
1034  * tv_sec: long (signed 64bit integer)
1035  *
1036  * MinGW-w64 (x64):
1037  * time_t: long long (signed 64bit integer)
1038  * tv_sec: long (signed 32bit integer)
1039  */
1040 
1041 #if SIGNEDNESS_OF_TIME_T < 0 /* signed */
1042 # define TIMEVAL_SEC_MAX SIGNED_INTEGER_MAX(TYPEOF_TIMEVAL_TV_SEC)
1043 # define TIMEVAL_SEC_MIN SIGNED_INTEGER_MIN(TYPEOF_TIMEVAL_TV_SEC)
1044 #elif SIGNEDNESS_OF_TIME_T > 0 /* unsigned */
1045 # define TIMEVAL_SEC_MAX ((TYPEOF_TIMEVAL_TV_SEC)(~(unsigned_time_t)0))
1046 # define TIMEVAL_SEC_MIN ((TYPEOF_TIMEVAL_TV_SEC)0)
1047 #endif
1048 
1049 static struct timeval
1050 double2timeval(double d)
1051 {
1052  /* assume timeval.tv_sec has same signedness as time_t */
1053  const double TIMEVAL_SEC_MAX_PLUS_ONE = (2*(double)(TIMEVAL_SEC_MAX/2+1));
1054 
1055  struct timeval time;
1056 
1057  if (TIMEVAL_SEC_MAX_PLUS_ONE <= d) {
1058  time.tv_sec = TIMEVAL_SEC_MAX;
1059  time.tv_usec = 999999;
1060  }
1061  else if (d <= TIMEVAL_SEC_MIN) {
1062  time.tv_sec = TIMEVAL_SEC_MIN;
1063  time.tv_usec = 0;
1064  }
1065  else {
1066  time.tv_sec = (TYPEOF_TIMEVAL_TV_SEC)d;
1067  time.tv_usec = (int)((d - (time_t)d) * 1e6);
1068  if (time.tv_usec < 0) {
1069  time.tv_usec += (int)1e6;
1070  time.tv_sec -= 1;
1071  }
1072  }
1073  return time;
1074 }
1075 
1076 static void
1077 sleep_forever(rb_thread_t *th, int deadlockable, int spurious_check)
1078 {
1079  enum rb_thread_status prev_status = th->status;
1080  enum rb_thread_status status = deadlockable ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1081 
1082  th->status = status;
1084  while (th->status == status) {
1085  if (deadlockable) {
1086  th->vm->sleeper++;
1087  rb_check_deadlock(th->vm);
1088  }
1089  native_sleep(th, 0);
1090  if (deadlockable) {
1091  th->vm->sleeper--;
1092  }
1094  if (!spurious_check)
1095  break;
1096  }
1097  th->status = prev_status;
1098 }
1099 
1100 static void
1101 getclockofday(struct timeval *tp)
1102 {
1103 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1104  struct timespec ts;
1105 
1106  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
1107  tp->tv_sec = ts.tv_sec;
1108  tp->tv_usec = (int)(ts.tv_nsec / 1000);
1109  }
1110  else
1111 #endif
1112  {
1113  gettimeofday(tp, NULL);
1114  }
1115 }
1116 
1117 static void
1118 sleep_timeval(rb_thread_t *th, struct timeval tv, int spurious_check)
1119 {
1120  struct timeval to, tvn;
1121  enum rb_thread_status prev_status = th->status;
1122 
1123  getclockofday(&to);
1124  if (TIMEVAL_SEC_MAX - tv.tv_sec < to.tv_sec)
1125  to.tv_sec = TIMEVAL_SEC_MAX;
1126  else
1127  to.tv_sec += tv.tv_sec;
1128  if ((to.tv_usec += tv.tv_usec) >= 1000000) {
1129  if (to.tv_sec == TIMEVAL_SEC_MAX)
1130  to.tv_usec = 999999;
1131  else {
1132  to.tv_sec++;
1133  to.tv_usec -= 1000000;
1134  }
1135  }
1136 
1137  th->status = THREAD_STOPPED;
1139  while (th->status == THREAD_STOPPED) {
1140  native_sleep(th, &tv);
1142  getclockofday(&tvn);
1143  if (to.tv_sec < tvn.tv_sec) break;
1144  if (to.tv_sec == tvn.tv_sec && to.tv_usec <= tvn.tv_usec) break;
1145  thread_debug("sleep_timeval: %"PRI_TIMET_PREFIX"d.%.6ld > %"PRI_TIMET_PREFIX"d.%.6ld\n",
1146  (time_t)to.tv_sec, (long)to.tv_usec,
1147  (time_t)tvn.tv_sec, (long)tvn.tv_usec);
1148  tv.tv_sec = to.tv_sec - tvn.tv_sec;
1149  if ((tv.tv_usec = to.tv_usec - tvn.tv_usec) < 0) {
1150  --tv.tv_sec;
1151  tv.tv_usec += 1000000;
1152  }
1153  if (!spurious_check)
1154  break;
1155  }
1156  th->status = prev_status;
1157 }
1158 
1159 void
1161 {
1162  thread_debug("rb_thread_sleep_forever\n");
1163  sleep_forever(GET_THREAD(), FALSE, TRUE);
1164 }
1165 
1166 void
1168 {
1169  thread_debug("rb_thread_sleep_deadly\n");
1170  sleep_forever(GET_THREAD(), TRUE, TRUE);
1171 }
1172 
1173 static void
1174 rb_thread_sleep_deadly_allow_spurious_wakeup(void)
1175 {
1176  thread_debug("rb_thread_sleep_deadly_allow_spurious_wakeup\n");
1177  sleep_forever(GET_THREAD(), TRUE, FALSE);
1178 }
1179 
1180 static double
1181 timeofday(void)
1182 {
1183 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1184  struct timespec tp;
1185 
1186  if (clock_gettime(CLOCK_MONOTONIC, &tp) == 0) {
1187  return (double)tp.tv_sec + (double)tp.tv_nsec * 1e-9;
1188  }
1189  else
1190 #endif
1191  {
1192  struct timeval tv;
1193  gettimeofday(&tv, NULL);
1194  return (double)tv.tv_sec + (double)tv.tv_usec * 1e-6;
1195  }
1196 }
1197 
1198 static void
1199 sleep_wait_for_interrupt(rb_thread_t *th, double sleepsec, int spurious_check)
1200 {
1201  sleep_timeval(th, double2timeval(sleepsec), spurious_check);
1202 }
1203 
1204 void
1206 {
1207  rb_thread_t *th = GET_THREAD();
1208  sleep_timeval(th, time, 1);
1209 }
1210 
1211 /*
1212  * CAUTION: This function causes thread switching.
1213  * rb_thread_check_ints() check ruby's interrupts.
1214  * some interrupt needs thread switching/invoke handlers,
1215  * and so on.
1216  */
1217 
1218 void
1220 {
1221  rb_thread_t *th = GET_THREAD();
1223 }
1224 
1225 /*
1226  * Hidden API for tcl/tk wrapper.
1227  * There is no guarantee to perpetuate it.
1228  */
1229 int
1231 {
1232  return rb_signal_buff_size() != 0;
1233 }
1234 
1235 /* This function can be called in blocking region. */
1236 int
1238 {
1239  return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval));
1240 }
1241 
1242 void
1244 {
1246 }
1247 
1248 static void
1249 rb_thread_schedule_limits(uint32_t limits_us)
1250 {
1251  thread_debug("rb_thread_schedule\n");
1252  if (!rb_thread_alone()) {
1253  rb_thread_t *th = GET_THREAD();
1254 
1255  if (th->running_time_us >= limits_us) {
1256  thread_debug("rb_thread_schedule/switch start\n");
1258  gvl_yield(th->vm, th);
1260  thread_debug("rb_thread_schedule/switch done\n");
1261  }
1262  }
1263 }
1264 
1265 void
1267 {
1268  rb_thread_t *cur_th = GET_THREAD();
1269  rb_thread_schedule_limits(0);
1270 
1271  RUBY_VM_CHECK_INTS(cur_th);
1272 }
1273 
1274 /* blocking region */
1275 
1276 static inline int
1277 blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1278  rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1279 {
1280  region->prev_status = th->status;
1281  if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1282  th->blocking_region_buffer = region;
1283  th->status = THREAD_STOPPED;
1284  thread_debug("enter blocking region (%p)\n", (void *)th);
1286  gvl_release(th->vm);
1287  return TRUE;
1288  }
1289  else {
1290  return FALSE;
1291  }
1292 }
1293 
1294 static inline void
1295 blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1296 {
1297  gvl_acquire(th->vm, th);
1299  thread_debug("leave blocking region (%p)\n", (void *)th);
1300  unregister_ubf_list(th);
1301  th->blocking_region_buffer = 0;
1302  unblock_function_clear(th);
1303  if (th->status == THREAD_STOPPED) {
1304  th->status = region->prev_status;
1305  }
1306 }
1307 
1308 static void *
1309 call_without_gvl(void *(*func)(void *), void *data1,
1310  rb_unblock_function_t *ubf, void *data2, int fail_if_interrupted)
1311 {
1312  void *val = 0;
1313 
1314  rb_thread_t *th = GET_THREAD();
1315  int saved_errno = 0;
1316 
1317  if (ubf == RUBY_UBF_IO || ubf == RUBY_UBF_PROCESS) {
1318  ubf = ubf_select;
1319  data2 = th;
1320  }
1321 
1322  BLOCKING_REGION({
1323  val = func(data1);
1324  saved_errno = errno;
1325  }, ubf, data2, fail_if_interrupted);
1326 
1327  if (!fail_if_interrupted) {
1329  }
1330 
1331  errno = saved_errno;
1332 
1333  return val;
1334 }
1335 
1336 /*
1337  * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1338  * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1339  * without interrupt process.
1340  *
1341  * rb_thread_call_without_gvl() does:
1342  * (1) Check interrupts.
1343  * (2) release GVL.
1344  * Other Ruby threads may run in parallel.
1345  * (3) call func with data1
1346  * (4) acquire GVL.
1347  * Other Ruby threads can not run in parallel any more.
1348  * (5) Check interrupts.
1349  *
1350  * rb_thread_call_without_gvl2() does:
1351  * (1) Check interrupt and return if interrupted.
1352  * (2) release GVL.
1353  * (3) call func with data1 and a pointer to the flags.
1354  * (4) acquire GVL.
1355  *
1356  * If another thread interrupts this thread (Thread#kill, signal delivery,
1357  * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1358  * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1359  * toggling a cancellation flag, canceling the invocation of a call inside
1360  * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1361  *
1362  * There are built-in ubfs and you can specify these ubfs:
1363  *
1364  * * RUBY_UBF_IO: ubf for IO operation
1365  * * RUBY_UBF_PROCESS: ubf for process operation
1366  *
1367  * However, we can not guarantee our built-in ubfs interrupt your `func()'
1368  * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1369  * provide proper ubf(), your program will not stop for Control+C or other
1370  * shutdown events.
1371  *
1372  * "Check interrupts" on above list means checking asynchronous
1373  * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1374  * request, and so on) and calling corresponding procedures
1375  * (such as `trap' for signals, raise an exception for Thread#raise).
1376  * If `func()' finished and received interrupts, you may skip interrupt
1377  * checking. For example, assume the following func() it reads data from file.
1378  *
1379  * read_func(...) {
1380  * // (a) before read
1381  * read(buffer); // (b) reading
1382  * // (c) after read
1383  * }
1384  *
1385  * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1386  * `read_func()' and interrupts are checked. However, if an interrupt occurs
1387  * at (c), after *read* operation is completed, checking interrupts is harmful
1388  * because it causes irrevocable side-effect, the read data will vanish. To
1389  * avoid such problem, the `read_func()' should be used with
1390  * `rb_thread_call_without_gvl2()'.
1391  *
1392  * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1393  * immediately. This function does not show when the execution was interrupted.
1394  * For example, there are 4 possible timing (a), (b), (c) and before calling
1395  * read_func(). You need to record progress of a read_func() and check
1396  * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1397  * `rb_thread_check_ints()' correctly or your program can not process proper
1398  * process such as `trap' and so on.
1399  *
1400  * NOTE: You can not execute most of Ruby C API and touch Ruby
1401  * objects in `func()' and `ubf()', including raising an
1402  * exception, because current thread doesn't acquire GVL
1403  * (it causes synchronization problems). If you need to
1404  * call ruby functions either use rb_thread_call_with_gvl()
1405  * or read source code of C APIs and confirm safety by
1406  * yourself.
1407  *
1408  * NOTE: In short, this API is difficult to use safely. I recommend you
1409  * use other ways if you have. We lack experiences to use this API.
1410  * Please report your problem related on it.
1411  *
1412  * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1413  * for a short running `func()'. Be sure to benchmark and use this
1414  * mechanism when `func()' consumes enough time.
1415  *
1416  * Safe C API:
1417  * * rb_thread_interrupted() - check interrupt flag
1418  * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1419  * they will work without GVL, and may acquire GVL when GC is needed.
1420  */
1421 void *
1422 rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1423  rb_unblock_function_t *ubf, void *data2)
1424 {
1425  return call_without_gvl(func, data1, ubf, data2, TRUE);
1426 }
1427 
1428 void *
1429 rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1430  rb_unblock_function_t *ubf, void *data2)
1431 {
1432  return call_without_gvl(func, data1, ubf, data2, FALSE);
1433 }
1434 
1435 VALUE
1437 {
1438  volatile VALUE val = Qundef; /* shouldn't be used */
1439  rb_vm_t *vm = GET_VM();
1440  rb_thread_t *th = GET_THREAD();
1441  volatile int saved_errno = 0;
1442  enum ruby_tag_type state;
1443  struct waiting_fd wfd;
1444 
1445  wfd.fd = fd;
1446  wfd.th = th;
1447  list_add(&vm->waiting_fds, &wfd.wfd_node);
1448 
1449  TH_PUSH_TAG(th);
1450  if ((state = EXEC_TAG()) == TAG_NONE) {
1451  BLOCKING_REGION({
1452  val = func(data1);
1453  saved_errno = errno;
1454  }, ubf_select, th, FALSE);
1455  }
1456  TH_POP_TAG();
1457 
1458  /* must be deleted before jump */
1459  list_del(&wfd.wfd_node);
1460 
1461  if (state) {
1462  TH_JUMP_TAG(th, state);
1463  }
1464  /* TODO: check func() */
1466 
1467  errno = saved_errno;
1468 
1469  return val;
1470 }
1471 
1472 /*
1473  * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1474  *
1475  * After releasing GVL using
1476  * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1477  * methods. If you need to access Ruby you must use this function
1478  * rb_thread_call_with_gvl().
1479  *
1480  * This function rb_thread_call_with_gvl() does:
1481  * (1) acquire GVL.
1482  * (2) call passed function `func'.
1483  * (3) release GVL.
1484  * (4) return a value which is returned at (2).
1485  *
1486  * NOTE: You should not return Ruby object at (2) because such Object
1487  * will not be marked.
1488  *
1489  * NOTE: If an exception is raised in `func', this function DOES NOT
1490  * protect (catch) the exception. If you have any resources
1491  * which should free before throwing exception, you need use
1492  * rb_protect() in `func' and return a value which represents
1493  * exception was raised.
1494  *
1495  * NOTE: This function should not be called by a thread which was not
1496  * created as Ruby thread (created by Thread.new or so). In other
1497  * words, this function *DOES NOT* associate or convert a NON-Ruby
1498  * thread to a Ruby thread.
1499  */
1500 void *
1501 rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1502 {
1503  rb_thread_t *th = ruby_thread_from_native();
1504  struct rb_blocking_region_buffer *brb;
1505  struct rb_unblock_callback prev_unblock;
1506  void *r;
1507 
1508  if (th == 0) {
1509  /* Error has occurred, but we can't use rb_bug()
1510  * because this thread is not Ruby's thread.
1511  * What should we do?
1512  */
1513 
1514  fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1515  exit(EXIT_FAILURE);
1516  }
1517 
1519  prev_unblock = th->unblock;
1520 
1521  if (brb == 0) {
1522  rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1523  }
1524 
1525  blocking_region_end(th, brb);
1526  /* enter to Ruby world: You can access Ruby values, methods and so on. */
1527  r = (*func)(data1);
1528  /* leave from Ruby world: You can not access Ruby values, etc. */
1529  blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1530  return r;
1531 }
1532 
1533 /*
1534  * ruby_thread_has_gvl_p - check if current native thread has GVL.
1535  *
1536  ***
1537  *** This API is EXPERIMENTAL!
1538  *** We do not guarantee that this API remains in ruby 1.9.2 or later.
1539  ***
1540  */
1541 
1542 int
1544 {
1545  rb_thread_t *th = ruby_thread_from_native();
1546 
1547  if (th && th->blocking_region_buffer == 0) {
1548  return 1;
1549  }
1550  else {
1551  return 0;
1552  }
1553 }
1554 
1555 /*
1556  * call-seq:
1557  * Thread.pass -> nil
1558  *
1559  * Give the thread scheduler a hint to pass execution to another thread.
1560  * A running thread may or may not switch, it depends on OS and processor.
1561  */
1562 
1563 static VALUE
1564 thread_s_pass(VALUE klass)
1565 {
1567  return Qnil;
1568 }
1569 
1570 /*****************************************************/
1571 
1572 /*
1573  * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1574  *
1575  * Async events such as an exception thrown by Thread#raise,
1576  * Thread#kill and thread termination (after main thread termination)
1577  * will be queued to th->pending_interrupt_queue.
1578  * - clear: clear the queue.
1579  * - enque: enqueue err object into queue.
1580  * - deque: dequeue err object from queue.
1581  * - active_p: return 1 if the queue should be checked.
1582  *
1583  * All rb_threadptr_pending_interrupt_* functions are called by
1584  * a GVL acquired thread, of course.
1585  * Note that all "rb_" prefix APIs need GVL to call.
1586  */
1587 
1588 void
1590 {
1592 }
1593 
1594 void
1596 {
1599 }
1600 
1601 static void
1602 threadptr_check_pending_interrupt_queue(rb_thread_t *th)
1603 {
1604  if (!th->pending_interrupt_queue) {
1605  rb_raise(rb_eThreadError, "uninitialized thread");
1606  }
1607 }
1608 
1614 };
1615 
1616 static enum handle_interrupt_timing
1617 rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
1618 {
1619  VALUE mask;
1620  long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
1621  const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
1622  VALUE mod;
1623  long i;
1624 
1625  for (i=0; i<mask_stack_len; i++) {
1626  mask = mask_stack[mask_stack_len-(i+1)];
1627 
1628  for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
1629  VALUE klass = mod;
1630  VALUE sym;
1631 
1632  if (BUILTIN_TYPE(mod) == T_ICLASS) {
1633  klass = RBASIC(mod)->klass;
1634  }
1635  else if (mod != RCLASS_ORIGIN(mod)) {
1636  continue;
1637  }
1638 
1639  if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
1640  if (sym == sym_immediate) {
1641  return INTERRUPT_IMMEDIATE;
1642  }
1643  else if (sym == sym_on_blocking) {
1644  return INTERRUPT_ON_BLOCKING;
1645  }
1646  else if (sym == sym_never) {
1647  return INTERRUPT_NEVER;
1648  }
1649  else {
1650  rb_raise(rb_eThreadError, "unknown mask signature");
1651  }
1652  }
1653  }
1654  /* try to next mask */
1655  }
1656  return INTERRUPT_NONE;
1657 }
1658 
1659 static int
1660 rb_threadptr_pending_interrupt_empty_p(rb_thread_t *th)
1661 {
1662  return RARRAY_LEN(th->pending_interrupt_queue) == 0;
1663 }
1664 
1665 static int
1666 rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
1667 {
1668  int i;
1669  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1671  if (rb_class_inherited_p(e, err)) {
1672  return TRUE;
1673  }
1674  }
1675  return FALSE;
1676 }
1677 
1678 static VALUE
1679 rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
1680 {
1681 #if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
1682  int i;
1683 
1684  for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
1686 
1687  enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
1688 
1689  switch (mask_timing) {
1690  case INTERRUPT_ON_BLOCKING:
1691  if (timing != INTERRUPT_ON_BLOCKING) {
1692  break;
1693  }
1694  /* fall through */
1695  case INTERRUPT_NONE: /* default: IMMEDIATE */
1696  case INTERRUPT_IMMEDIATE:
1698  return err;
1699  case INTERRUPT_NEVER:
1700  break;
1701  }
1702  }
1703 
1705  return Qundef;
1706 #else
1708  if (rb_threadptr_pending_interrupt_empty_p(th)) {
1710  }
1711  return err;
1712 #endif
1713 }
1714 
1715 int
1717 {
1718  /*
1719  * For optimization, we don't check async errinfo queue
1720  * if the queue and the thread interrupt mask were not changed
1721  * since last check.
1722  */
1724  return 0;
1725  }
1726 
1727  if (rb_threadptr_pending_interrupt_empty_p(th)) {
1728  return 0;
1729  }
1730 
1731  return 1;
1732 }
1733 
1734 static int
1735 handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
1736 {
1737  VALUE *maskp = (VALUE *)args;
1738 
1739  if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
1740  rb_raise(rb_eArgError, "unknown mask signature");
1741  }
1742 
1743  if (!*maskp) {
1744  *maskp = rb_ident_hash_new();
1745  }
1746  rb_hash_aset(*maskp, key, val);
1747 
1748  return ST_CONTINUE;
1749 }
1750 
1751 /*
1752  * call-seq:
1753  * Thread.handle_interrupt(hash) { ... } -> result of the block
1754  *
1755  * Changes asynchronous interrupt timing.
1756  *
1757  * _interrupt_ means asynchronous event and corresponding procedure
1758  * by Thread#raise, Thread#kill, signal trap (not supported yet)
1759  * and main thread termination (if main thread terminates, then all
1760  * other thread will be killed).
1761  *
1762  * The given +hash+ has pairs like <code>ExceptionClass =>
1763  * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
1764  * the given block. The TimingSymbol can be one of the following symbols:
1765  *
1766  * [+:immediate+] Invoke interrupts immediately.
1767  * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
1768  * [+:never+] Never invoke all interrupts.
1769  *
1770  * _BlockingOperation_ means that the operation will block the calling thread,
1771  * such as read and write. On CRuby implementation, _BlockingOperation_ is any
1772  * operation executed without GVL.
1773  *
1774  * Masked asynchronous interrupts are delayed until they are enabled.
1775  * This method is similar to sigprocmask(3).
1776  *
1777  * === NOTE
1778  *
1779  * Asynchronous interrupts are difficult to use.
1780  *
1781  * If you need to communicate between threads, please consider to use another way such as Queue.
1782  *
1783  * Or use them with deep understanding about this method.
1784  *
1785  * === Usage
1786  *
1787  * In this example, we can guard from Thread#raise exceptions.
1788  *
1789  * Using the +:never+ TimingSymbol the RuntimeError exception will always be
1790  * ignored in the first block of the main thread. In the second
1791  * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
1792  *
1793  * th = Thread.new do
1794  * Thread.handle_interrupt(RuntimeError => :never) {
1795  * begin
1796  * # You can write resource allocation code safely.
1797  * Thread.handle_interrupt(RuntimeError => :immediate) {
1798  * # ...
1799  * }
1800  * ensure
1801  * # You can write resource deallocation code safely.
1802  * end
1803  * }
1804  * end
1805  * Thread.pass
1806  * # ...
1807  * th.raise "stop"
1808  *
1809  * While we are ignoring the RuntimeError exception, it's safe to write our
1810  * resource allocation code. Then, the ensure block is where we can safely
1811  * deallocate your resources.
1812  *
1813  * ==== Guarding from Timeout::Error
1814  *
1815  * In the next example, we will guard from the Timeout::Error exception. This
1816  * will help prevent from leaking resources when Timeout::Error exceptions occur
1817  * during normal ensure clause. For this example we use the help of the
1818  * standard library Timeout, from lib/timeout.rb
1819  *
1820  * require 'timeout'
1821  * Thread.handle_interrupt(Timeout::Error => :never) {
1822  * timeout(10){
1823  * # Timeout::Error doesn't occur here
1824  * Thread.handle_interrupt(Timeout::Error => :on_blocking) {
1825  * # possible to be killed by Timeout::Error
1826  * # while blocking operation
1827  * }
1828  * # Timeout::Error doesn't occur here
1829  * }
1830  * }
1831  *
1832  * In the first part of the +timeout+ block, we can rely on Timeout::Error being
1833  * ignored. Then in the <code>Timeout::Error => :on_blocking</code> block, any
1834  * operation that will block the calling thread is susceptible to a
1835  * Timeout::Error exception being raised.
1836  *
1837  * ==== Stack control settings
1838  *
1839  * It's possible to stack multiple levels of ::handle_interrupt blocks in order
1840  * to control more than one ExceptionClass and TimingSymbol at a time.
1841  *
1842  * Thread.handle_interrupt(FooError => :never) {
1843  * Thread.handle_interrupt(BarError => :never) {
1844  * # FooError and BarError are prohibited.
1845  * }
1846  * }
1847  *
1848  * ==== Inheritance with ExceptionClass
1849  *
1850  * All exceptions inherited from the ExceptionClass parameter will be considered.
1851  *
1852  * Thread.handle_interrupt(Exception => :never) {
1853  * # all exceptions inherited from Exception are prohibited.
1854  * }
1855  *
1856  */
1857 static VALUE
1858 rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
1859 {
1860  VALUE mask;
1861  rb_thread_t *th = GET_THREAD();
1862  volatile VALUE r = Qnil;
1863  enum ruby_tag_type state;
1864 
1865  if (!rb_block_given_p()) {
1866  rb_raise(rb_eArgError, "block is needed.");
1867  }
1868 
1869  mask = 0;
1870  mask_arg = rb_convert_type_with_id(mask_arg, T_HASH, "Hash", idTo_hash);
1871  rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
1872  if (!mask) {
1873  return rb_yield(Qnil);
1874  }
1875  OBJ_FREEZE_RAW(mask);
1877  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
1880  }
1881 
1882  TH_PUSH_TAG(th);
1883  if ((state = EXEC_TAG()) == TAG_NONE) {
1884  r = rb_yield(Qnil);
1885  }
1886  TH_POP_TAG();
1887 
1889  if (!rb_threadptr_pending_interrupt_empty_p(th)) {
1892  }
1893 
1894  RUBY_VM_CHECK_INTS(th);
1895 
1896  if (state) {
1897  TH_JUMP_TAG(th, state);
1898  }
1899 
1900  return r;
1901 }
1902 
1903 /*
1904  * call-seq:
1905  * target_thread.pending_interrupt?(error = nil) -> true/false
1906  *
1907  * Returns whether or not the asynchronous queue is empty for the target thread.
1908  *
1909  * If +error+ is given, then check only for +error+ type deferred events.
1910  *
1911  * See ::pending_interrupt? for more information.
1912  */
1913 static VALUE
1914 rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
1915 {
1916  rb_thread_t *target_th = rb_thread_ptr(target_thread);
1917 
1918  if (!target_th->pending_interrupt_queue) {
1919  return Qfalse;
1920  }
1921  if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
1922  return Qfalse;
1923  }
1924  else {
1925  if (argc == 1) {
1926  VALUE err;
1927  rb_scan_args(argc, argv, "01", &err);
1928  if (!rb_obj_is_kind_of(err, rb_cModule)) {
1929  rb_raise(rb_eTypeError, "class or module required for rescue clause");
1930  }
1931  if (rb_threadptr_pending_interrupt_include_p(target_th, err)) {
1932  return Qtrue;
1933  }
1934  else {
1935  return Qfalse;
1936  }
1937  }
1938  return Qtrue;
1939  }
1940 }
1941 
1942 /*
1943  * call-seq:
1944  * Thread.pending_interrupt?(error = nil) -> true/false
1945  *
1946  * Returns whether or not the asynchronous queue is empty.
1947  *
1948  * Since Thread::handle_interrupt can be used to defer asynchronous events,
1949  * this method can be used to determine if there are any deferred events.
1950  *
1951  * If you find this method returns true, then you may finish +:never+ blocks.
1952  *
1953  * For example, the following method processes deferred asynchronous events
1954  * immediately.
1955  *
1956  * def Thread.kick_interrupt_immediately
1957  * Thread.handle_interrupt(Object => :immediate) {
1958  * Thread.pass
1959  * }
1960  * end
1961  *
1962  * If +error+ is given, then check only for +error+ type deferred events.
1963  *
1964  * === Usage
1965  *
1966  * th = Thread.new{
1967  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1968  * while true
1969  * ...
1970  * # reach safe point to invoke interrupt
1971  * if Thread.pending_interrupt?
1972  * Thread.handle_interrupt(Object => :immediate){}
1973  * end
1974  * ...
1975  * end
1976  * }
1977  * }
1978  * ...
1979  * th.raise # stop thread
1980  *
1981  * This example can also be written as the following, which you should use to
1982  * avoid asynchronous interrupts.
1983  *
1984  * flag = true
1985  * th = Thread.new{
1986  * Thread.handle_interrupt(RuntimeError => :on_blocking){
1987  * while true
1988  * ...
1989  * # reach safe point to invoke interrupt
1990  * break if flag == false
1991  * ...
1992  * end
1993  * }
1994  * }
1995  * ...
1996  * flag = false # stop thread
1997  */
1998 
1999 static VALUE
2000 rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2001 {
2002  return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2003 }
2004 
2005 static void
2006 rb_threadptr_to_kill(rb_thread_t *th)
2007 {
2009  th->status = THREAD_RUNNABLE;
2010  th->to_kill = 1;
2011  th->ec.errinfo = INT2FIX(TAG_FATAL);
2012  TH_JUMP_TAG(th, TAG_FATAL);
2013 }
2014 
2015 static inline rb_atomic_t
2016 threadptr_get_interrupts(rb_thread_t *th)
2017 {
2018  rb_atomic_t interrupt;
2019  rb_atomic_t old;
2020 
2021  do {
2022  interrupt = th->interrupt_flag;
2023  old = ATOMIC_CAS(th->interrupt_flag, interrupt, interrupt & th->interrupt_mask);
2024  } while (old != interrupt);
2025  return interrupt & (rb_atomic_t)~th->interrupt_mask;
2026 }
2027 
2028 void
2030 {
2031  rb_atomic_t interrupt;
2032  int postponed_job_interrupt = 0;
2033 
2034  if (th->ec.raised_flag) return;
2035 
2036  while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2037  int sig;
2038  int timer_interrupt;
2039  int pending_interrupt;
2040  int trap_interrupt;
2041 
2042  timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2043  pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2044  postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2045  trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2046 
2047  if (postponed_job_interrupt) {
2049  }
2050 
2051  /* signal handling */
2052  if (trap_interrupt && (th == th->vm->main_thread)) {
2053  enum rb_thread_status prev_status = th->status;
2054  th->status = THREAD_RUNNABLE;
2055  while ((sig = rb_get_next_signal()) != 0) {
2056  rb_signal_exec(th, sig);
2057  }
2058  th->status = prev_status;
2059  }
2060 
2061  /* exception from another thread */
2062  if (pending_interrupt && rb_threadptr_pending_interrupt_active_p(th)) {
2063  VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2064  thread_debug("rb_thread_execute_interrupts: %"PRIdVALUE"\n", err);
2065 
2066  if (err == Qundef) {
2067  /* no error */
2068  }
2069  else if (err == eKillSignal /* Thread#kill received */ ||
2070  err == eTerminateSignal /* Terminate thread */ ||
2071  err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2072  rb_threadptr_to_kill(th);
2073  }
2074  else {
2075  if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2076  /* the only special exception to be queued accross thread */
2077  err = ruby_vm_special_exception_copy(err);
2078  }
2079  /* set runnable if th was slept. */
2080  if (th->status == THREAD_STOPPED ||
2082  th->status = THREAD_RUNNABLE;
2083  rb_exc_raise(err);
2084  }
2085  }
2086 
2087  if (timer_interrupt) {
2088  uint32_t limits_us = TIME_QUANTUM_USEC;
2089 
2090  if (th->priority > 0)
2091  limits_us <<= th->priority;
2092  else
2093  limits_us >>= -th->priority;
2094 
2095  if (th->status == THREAD_RUNNABLE)
2096  th->running_time_us += TIME_QUANTUM_USEC;
2097 
2099  0, 0, 0, Qundef);
2100 
2101  rb_thread_schedule_limits(limits_us);
2102  }
2103  }
2104 }
2105 
2106 void
2108 {
2109  rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2110 }
2111 
2112 static void
2113 rb_threadptr_ready(rb_thread_t *th)
2114 {
2116 }
2117 
2118 void rb_threadptr_setup_exception(rb_thread_t *th, VALUE mesg, VALUE cause);
2119 
2120 static VALUE
2121 rb_threadptr_raise(rb_thread_t *th, int argc, VALUE *argv)
2122 {
2123  VALUE exc;
2124 
2125  if (rb_threadptr_dead(th)) {
2126  return Qnil;
2127  }
2128 
2129  if (argc == 0) {
2130  exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2131  }
2132  else {
2133  exc = rb_make_exception(argc, argv);
2134  }
2135 
2136  /* making an exception object can switch thread,
2137  so we need to check thread deadness again */
2138  if (rb_threadptr_dead(th)) {
2139  return Qnil;
2140  }
2141 
2145  return Qnil;
2146 }
2147 
2148 void
2150 {
2151  VALUE argv[2];
2152 
2153  argv[0] = rb_eSignal;
2154  argv[1] = INT2FIX(sig);
2155  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2156 }
2157 
2158 void
2160 {
2161  VALUE argv[2];
2162 
2163  argv[0] = rb_eSystemExit;
2164  argv[1] = rb_str_new2("exit");
2165  rb_threadptr_raise(th->vm->main_thread, 2, argv);
2166 }
2167 
2168 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2169 #define USE_SIGALTSTACK
2170 #endif
2171 
2172 int
2174 {
2175  if (th->ec.raised_flag & RAISED_EXCEPTION) {
2176  return 1;
2177  }
2179  return 0;
2180 }
2181 
2182 int
2184 {
2185  if (!(th->ec.raised_flag & RAISED_EXCEPTION)) {
2186  return 0;
2187  }
2189  return 1;
2190 }
2191 
2192 int
2194 {
2195  rb_vm_t *vm = GET_THREAD()->vm;
2196  struct waiting_fd *wfd = 0;
2197  int busy;
2198 
2199  busy = 0;
2200  list_for_each(&vm->waiting_fds, wfd, wfd_node) {
2201  if (wfd->fd == fd) {
2202  rb_thread_t *th = wfd->th;
2203  VALUE err;
2204 
2205  busy = 1;
2206  if (!th) {
2207  continue;
2208  }
2209  wfd->th = 0;
2213  }
2214  }
2215  return busy;
2216 }
2217 
2218 void
2220 {
2221  while (rb_notify_fd_close(fd)) rb_thread_schedule();
2222 }
2223 
2224 /*
2225  * call-seq:
2226  * thr.raise
2227  * thr.raise(string)
2228  * thr.raise(exception [, string [, array]])
2229  *
2230  * Raises an exception from the given thread. The caller does not have to be
2231  * +thr+. See Kernel#raise for more information.
2232  *
2233  * Thread.abort_on_exception = true
2234  * a = Thread.new { sleep(200) }
2235  * a.raise("Gotcha")
2236  *
2237  * This will produce:
2238  *
2239  * prog.rb:3: Gotcha (RuntimeError)
2240  * from prog.rb:2:in `initialize'
2241  * from prog.rb:2:in `new'
2242  * from prog.rb:2
2243  */
2244 
2245 static VALUE
2246 thread_raise_m(int argc, VALUE *argv, VALUE self)
2247 {
2248  rb_thread_t *target_th = rb_thread_ptr(self);
2249  const rb_thread_t *current_th = GET_THREAD();
2250 
2251  threadptr_check_pending_interrupt_queue(target_th);
2252  rb_threadptr_raise(target_th, argc, argv);
2253 
2254  /* To perform Thread.current.raise as Kernel.raise */
2255  if (current_th == target_th) {
2256  RUBY_VM_CHECK_INTS(target_th);
2257  }
2258  return Qnil;
2259 }
2260 
2261 
2262 /*
2263  * call-seq:
2264  * thr.exit -> thr or nil
2265  * thr.kill -> thr or nil
2266  * thr.terminate -> thr or nil
2267  *
2268  * Terminates +thr+ and schedules another thread to be run.
2269  *
2270  * If this thread is already marked to be killed, #exit returns the Thread.
2271  *
2272  * If this is the main thread, or the last thread, exits the process.
2273  */
2274 
2275 VALUE
2277 {
2278  rb_thread_t *th = rb_thread_ptr(thread);
2279 
2280  if (th->to_kill || th->status == THREAD_KILLED) {
2281  return thread;
2282  }
2283  if (th == th->vm->main_thread) {
2285  }
2286 
2287  thread_debug("rb_thread_kill: %p (%"PRI_THREAD_ID")\n", (void *)th, thread_id_str(th));
2288 
2289  if (th == GET_THREAD()) {
2290  /* kill myself immediately */
2291  rb_threadptr_to_kill(th);
2292  }
2293  else {
2294  threadptr_check_pending_interrupt_queue(th);
2297  }
2298  return thread;
2299 }
2300 
2301 int
2303 {
2304  rb_thread_t *th = rb_thread_ptr(thread);
2305 
2306  if (th->to_kill || th->status == THREAD_KILLED) {
2307  return TRUE;
2308  }
2309  return FALSE;
2310 }
2311 
2312 /*
2313  * call-seq:
2314  * Thread.kill(thread) -> thread
2315  *
2316  * Causes the given +thread+ to exit, see also Thread::exit.
2317  *
2318  * count = 0
2319  * a = Thread.new { loop { count += 1 } }
2320  * sleep(0.1) #=> 0
2321  * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2322  * count #=> 93947
2323  * a.alive? #=> false
2324  */
2325 
2326 static VALUE
2327 rb_thread_s_kill(VALUE obj, VALUE th)
2328 {
2329  return rb_thread_kill(th);
2330 }
2331 
2332 
2333 /*
2334  * call-seq:
2335  * Thread.exit -> thread
2336  *
2337  * Terminates the currently running thread and schedules another thread to be
2338  * run.
2339  *
2340  * If this thread is already marked to be killed, ::exit returns the Thread.
2341  *
2342  * If this is the main thread, or the last thread, exit the process.
2343  */
2344 
2345 static VALUE
2346 rb_thread_exit(void)
2347 {
2348  rb_thread_t *th = GET_THREAD();
2349  return rb_thread_kill(th->self);
2350 }
2351 
2352 
2353 /*
2354  * call-seq:
2355  * thr.wakeup -> thr
2356  *
2357  * Marks a given thread as eligible for scheduling, however it may still
2358  * remain blocked on I/O.
2359  *
2360  * *Note:* This does not invoke the scheduler, see #run for more information.
2361  *
2362  * c = Thread.new { Thread.stop; puts "hey!" }
2363  * sleep 0.1 while c.status!='sleep'
2364  * c.wakeup
2365  * c.join
2366  * #=> "hey!"
2367  */
2368 
2369 VALUE
2371 {
2372  if (!RTEST(rb_thread_wakeup_alive(thread))) {
2373  rb_raise(rb_eThreadError, "killed thread");
2374  }
2375  return thread;
2376 }
2377 
2378 VALUE
2380 {
2381  rb_thread_t *target_th = rb_thread_ptr(thread);
2382  if (target_th->status == THREAD_KILLED) return Qnil;
2383 
2384  rb_threadptr_ready(target_th);
2385 
2386  if (target_th->status == THREAD_STOPPED ||
2387  target_th->status == THREAD_STOPPED_FOREVER) {
2388  target_th->status = THREAD_RUNNABLE;
2389  }
2390 
2391  return thread;
2392 }
2393 
2394 
2395 /*
2396  * call-seq:
2397  * thr.run -> thr
2398  *
2399  * Wakes up +thr+, making it eligible for scheduling.
2400  *
2401  * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2402  * sleep 0.1 while a.status!='sleep'
2403  * puts "Got here"
2404  * a.run
2405  * a.join
2406  *
2407  * This will produce:
2408  *
2409  * a
2410  * Got here
2411  * c
2412  *
2413  * See also the instance method #wakeup.
2414  */
2415 
2416 VALUE
2418 {
2419  rb_thread_wakeup(thread);
2421  return thread;
2422 }
2423 
2424 
2425 /*
2426  * call-seq:
2427  * Thread.stop -> nil
2428  *
2429  * Stops execution of the current thread, putting it into a ``sleep'' state,
2430  * and schedules execution of another thread.
2431  *
2432  * a = Thread.new { print "a"; Thread.stop; print "c" }
2433  * sleep 0.1 while a.status!='sleep'
2434  * print "b"
2435  * a.run
2436  * a.join
2437  * #=> "abc"
2438  */
2439 
2440 VALUE
2442 {
2443  if (rb_thread_alone()) {
2445  "stopping only thread\n\tnote: use sleep to stop forever");
2446  }
2448  return Qnil;
2449 }
2450 
2451 /********************************************************************/
2452 
2453 /*
2454  * call-seq:
2455  * Thread.list -> array
2456  *
2457  * Returns an array of Thread objects for all threads that are either runnable
2458  * or stopped.
2459  *
2460  * Thread.new { sleep(200) }
2461  * Thread.new { 1000000.times {|i| i*i } }
2462  * Thread.new { Thread.stop }
2463  * Thread.list.each {|t| p t}
2464  *
2465  * This will produce:
2466  *
2467  * #<Thread:0x401b3e84 sleep>
2468  * #<Thread:0x401b3f38 run>
2469  * #<Thread:0x401b3fb0 sleep>
2470  * #<Thread:0x401bdf4c run>
2471  */
2472 
2473 VALUE
2475 {
2476  VALUE ary = rb_ary_new();
2477  rb_vm_t *vm = GET_THREAD()->vm;
2478  rb_thread_t *th = 0;
2479 
2480  list_for_each(&vm->living_threads, th, vmlt_node) {
2481  switch (th->status) {
2482  case THREAD_RUNNABLE:
2483  case THREAD_STOPPED:
2485  rb_ary_push(ary, th->self);
2486  default:
2487  break;
2488  }
2489  }
2490  return ary;
2491 }
2492 
2493 VALUE
2495 {
2496  return GET_THREAD()->self;
2497 }
2498 
2499 /*
2500  * call-seq:
2501  * Thread.current -> thread
2502  *
2503  * Returns the currently executing thread.
2504  *
2505  * Thread.current #=> #<Thread:0x401bdf4c run>
2506  */
2507 
2508 static VALUE
2509 thread_s_current(VALUE klass)
2510 {
2511  return rb_thread_current();
2512 }
2513 
2514 VALUE
2516 {
2517  return GET_THREAD()->vm->main_thread->self;
2518 }
2519 
2520 /*
2521  * call-seq:
2522  * Thread.main -> thread
2523  *
2524  * Returns the main thread.
2525  */
2526 
2527 static VALUE
2528 rb_thread_s_main(VALUE klass)
2529 {
2530  return rb_thread_main();
2531 }
2532 
2533 
2534 /*
2535  * call-seq:
2536  * Thread.abort_on_exception -> true or false
2537  *
2538  * Returns the status of the global ``abort on exception'' condition.
2539  *
2540  * The default is +false+.
2541  *
2542  * When set to +true+, if any thread is aborted by an exception, the
2543  * raised exception will be re-raised in the main thread.
2544  *
2545  * Can also be specified by the global $DEBUG flag or command line option
2546  * +-d+.
2547  *
2548  * See also ::abort_on_exception=.
2549  *
2550  * There is also an instance level method to set this for a specific thread,
2551  * see #abort_on_exception.
2552  */
2553 
2554 static VALUE
2555 rb_thread_s_abort_exc(void)
2556 {
2557  return GET_THREAD()->vm->thread_abort_on_exception ? Qtrue : Qfalse;
2558 }
2559 
2560 
2561 /*
2562  * call-seq:
2563  * Thread.abort_on_exception= boolean -> true or false
2564  *
2565  * When set to +true+, if any thread is aborted by an exception, the
2566  * raised exception will be re-raised in the main thread.
2567  * Returns the new state.
2568  *
2569  * Thread.abort_on_exception = true
2570  * t1 = Thread.new do
2571  * puts "In new thread"
2572  * raise "Exception from thread"
2573  * end
2574  * sleep(1)
2575  * puts "not reached"
2576  *
2577  * This will produce:
2578  *
2579  * In new thread
2580  * prog.rb:4: Exception from thread (RuntimeError)
2581  * from prog.rb:2:in `initialize'
2582  * from prog.rb:2:in `new'
2583  * from prog.rb:2
2584  *
2585  * See also ::abort_on_exception.
2586  *
2587  * There is also an instance level method to set this for a specific thread,
2588  * see #abort_on_exception=.
2589  */
2590 
2591 static VALUE
2592 rb_thread_s_abort_exc_set(VALUE self, VALUE val)
2593 {
2594  GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
2595  return val;
2596 }
2597 
2598 
2599 /*
2600  * call-seq:
2601  * thr.abort_on_exception -> true or false
2602  *
2603  * Returns the status of the thread-local ``abort on exception'' condition for
2604  * this +thr+.
2605  *
2606  * The default is +false+.
2607  *
2608  * See also #abort_on_exception=.
2609  *
2610  * There is also a class level method to set this for all threads, see
2611  * ::abort_on_exception.
2612  */
2613 
2614 static VALUE
2615 rb_thread_abort_exc(VALUE thread)
2616 {
2617  return rb_thread_ptr(thread)->abort_on_exception ? Qtrue : Qfalse;
2618 }
2619 
2620 
2621 /*
2622  * call-seq:
2623  * thr.abort_on_exception= boolean -> true or false
2624  *
2625  * When set to +true+, if this +thr+ is aborted by an exception, the
2626  * raised exception will be re-raised in the main thread.
2627  *
2628  * See also #abort_on_exception.
2629  *
2630  * There is also a class level method to set this for all threads, see
2631  * ::abort_on_exception=.
2632  */
2633 
2634 static VALUE
2635 rb_thread_abort_exc_set(VALUE thread, VALUE val)
2636 {
2637  rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
2638  return val;
2639 }
2640 
2641 
2642 /*
2643  * call-seq:
2644  * Thread.report_on_exception -> true or false
2645  *
2646  * Returns the status of the global ``report on exception'' condition.
2647  *
2648  * The default is +false+.
2649  *
2650  * When set to +true+, all threads will report the exception if an
2651  * exception is raised in any thread.
2652  *
2653  * See also ::report_on_exception=.
2654  *
2655  * There is also an instance level method to set this for a specific thread,
2656  * see #report_on_exception.
2657  */
2658 
2659 static VALUE
2660 rb_thread_s_report_exc(void)
2661 {
2662  return GET_THREAD()->vm->thread_report_on_exception ? Qtrue : Qfalse;
2663 }
2664 
2665 
2666 /*
2667  * call-seq:
2668  * Thread.report_on_exception= boolean -> true or false
2669  *
2670  * When set to +true+, all threads will report the exception if an
2671  * exception is raised. Returns the new state.
2672  *
2673  * Thread.report_on_exception = true
2674  * t1 = Thread.new do
2675  * puts "In new thread"
2676  * raise "Exception from thread"
2677  * end
2678  * sleep(1)
2679  * puts "In the main thread"
2680  *
2681  * This will produce:
2682  *
2683  * In new thread
2684  * prog.rb:4: Exception from thread (RuntimeError)
2685  * from prog.rb:2:in `initialize'
2686  * from prog.rb:2:in `new'
2687  * from prog.rb:2
2688  * In the main thread
2689  *
2690  * See also ::report_on_exception.
2691  *
2692  * There is also an instance level method to set this for a specific thread,
2693  * see #report_on_exception=.
2694  */
2695 
2696 static VALUE
2697 rb_thread_s_report_exc_set(VALUE self, VALUE val)
2698 {
2699  GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
2700  return val;
2701 }
2702 
2703 
2704 /*
2705  * call-seq:
2706  * thr.report_on_exception -> true or false
2707  *
2708  * Returns the status of the thread-local ``report on exception'' condition for
2709  * this +thr+.
2710  *
2711  * The default is +false+.
2712  *
2713  * See also #report_on_exception=.
2714  *
2715  * There is also a class level method to set this for all threads, see
2716  * ::report_on_exception.
2717  */
2718 
2719 static VALUE
2720 rb_thread_report_exc(VALUE thread)
2721 {
2722  return rb_thread_ptr(thread)->report_on_exception ? Qtrue : Qfalse;
2723 }
2724 
2725 
2726 /*
2727  * call-seq:
2728  * thr.report_on_exception= boolean -> true or false
2729  *
2730  * When set to +true+, all threads (including the main program) will
2731  * report the exception if an exception is raised in this +thr+.
2732  *
2733  * See also #report_on_exception.
2734  *
2735  * There is also a class level method to set this for all threads, see
2736  * ::report_on_exception=.
2737  */
2738 
2739 static VALUE
2740 rb_thread_report_exc_set(VALUE thread, VALUE val)
2741 {
2742  rb_thread_ptr(thread)->report_on_exception = RTEST(val);
2743  return val;
2744 }
2745 
2746 
2747 /*
2748  * call-seq:
2749  * thr.group -> thgrp or nil
2750  *
2751  * Returns the ThreadGroup which contains the given thread, or returns +nil+
2752  * if +thr+ is not a member of any group.
2753  *
2754  * Thread.main.group #=> #<ThreadGroup:0x4029d914>
2755  */
2756 
2757 VALUE
2759 {
2760  VALUE group = rb_thread_ptr(thread)->thgroup;
2761  return group == 0 ? Qnil : group;
2762 }
2763 
2764 static const char *
2765 thread_status_name(rb_thread_t *th, int detail)
2766 {
2767  switch (th->status) {
2768  case THREAD_RUNNABLE:
2769  return th->to_kill ? "aborting" : "run";
2771  if (detail) return "sleep_forever";
2772  case THREAD_STOPPED:
2773  return "sleep";
2774  case THREAD_KILLED:
2775  return "dead";
2776  default:
2777  return "unknown";
2778  }
2779 }
2780 
2781 static int
2782 rb_threadptr_dead(rb_thread_t *th)
2783 {
2784  return th->status == THREAD_KILLED;
2785 }
2786 
2787 
2788 /*
2789  * call-seq:
2790  * thr.status -> string, false or nil
2791  *
2792  * Returns the status of +thr+.
2793  *
2794  * [<tt>"sleep"</tt>]
2795  * Returned if this thread is sleeping or waiting on I/O
2796  * [<tt>"run"</tt>]
2797  * When this thread is executing
2798  * [<tt>"aborting"</tt>]
2799  * If this thread is aborting
2800  * [+false+]
2801  * When this thread is terminated normally
2802  * [+nil+]
2803  * If terminated with an exception.
2804  *
2805  * a = Thread.new { raise("die now") }
2806  * b = Thread.new { Thread.stop }
2807  * c = Thread.new { Thread.exit }
2808  * d = Thread.new { sleep }
2809  * d.kill #=> #<Thread:0x401b3678 aborting>
2810  * a.status #=> nil
2811  * b.status #=> "sleep"
2812  * c.status #=> false
2813  * d.status #=> "aborting"
2814  * Thread.current.status #=> "run"
2815  *
2816  * See also the instance methods #alive? and #stop?
2817  */
2818 
2819 static VALUE
2820 rb_thread_status(VALUE thread)
2821 {
2822  rb_thread_t *target_th = rb_thread_ptr(thread);
2823 
2824  if (rb_threadptr_dead(target_th)) {
2825  if (!NIL_P(target_th->ec.errinfo) &&
2826  !FIXNUM_P(target_th->ec.errinfo)) {
2827  return Qnil;
2828  }
2829  else {
2830  return Qfalse;
2831  }
2832  }
2833  else {
2834  return rb_str_new2(thread_status_name(target_th, FALSE));
2835  }
2836 }
2837 
2838 
2839 /*
2840  * call-seq:
2841  * thr.alive? -> true or false
2842  *
2843  * Returns +true+ if +thr+ is running or sleeping.
2844  *
2845  * thr = Thread.new { }
2846  * thr.join #=> #<Thread:0x401b3fb0 dead>
2847  * Thread.current.alive? #=> true
2848  * thr.alive? #=> false
2849  *
2850  * See also #stop? and #status.
2851  */
2852 
2853 static VALUE
2854 rb_thread_alive_p(VALUE thread)
2855 {
2856  if (rb_threadptr_dead(rb_thread_ptr(thread))) {
2857  return Qfalse;
2858  }
2859  else {
2860  return Qtrue;
2861  }
2862 }
2863 
2864 /*
2865  * call-seq:
2866  * thr.stop? -> true or false
2867  *
2868  * Returns +true+ if +thr+ is dead or sleeping.
2869  *
2870  * a = Thread.new { Thread.stop }
2871  * b = Thread.current
2872  * a.stop? #=> true
2873  * b.stop? #=> false
2874  *
2875  * See also #alive? and #status.
2876  */
2877 
2878 static VALUE
2879 rb_thread_stop_p(VALUE thread)
2880 {
2881  rb_thread_t *th = rb_thread_ptr(thread);
2882 
2883  if (rb_threadptr_dead(th)) {
2884  return Qtrue;
2885  }
2886  else if (th->status == THREAD_STOPPED ||
2887  th->status == THREAD_STOPPED_FOREVER) {
2888  return Qtrue;
2889  }
2890  else {
2891  return Qfalse;
2892  }
2893 }
2894 
2895 /*
2896  * call-seq:
2897  * thr.safe_level -> integer
2898  *
2899  * Returns the safe level in effect for <i>thr</i>. Setting thread-local safe
2900  * levels can help when implementing sandboxes which run insecure code.
2901  *
2902  * thr = Thread.new { $SAFE = 1; sleep }
2903  * Thread.current.safe_level #=> 0
2904  * thr.safe_level #=> 1
2905  */
2906 
2907 static VALUE
2908 rb_thread_safe_level(VALUE thread)
2909 {
2910  return INT2NUM(rb_thread_ptr(thread)->ec.safe_level);
2911 }
2912 
2913 /*
2914  * call-seq:
2915  * thr.name -> string
2916  *
2917  * show the name of the thread.
2918  */
2919 
2920 static VALUE
2921 rb_thread_getname(VALUE thread)
2922 {
2923  return rb_thread_ptr(thread)->name;
2924 }
2925 
2926 /*
2927  * call-seq:
2928  * thr.name=(name) -> string
2929  *
2930  * set given name to the ruby thread.
2931  * On some platform, it may set the name to pthread and/or kernel.
2932  */
2933 
2934 static VALUE
2935 rb_thread_setname(VALUE thread, VALUE name)
2936 {
2937  rb_thread_t *target_th = rb_thread_ptr(thread);
2938 
2939  if (!NIL_P(name)) {
2940  rb_encoding *enc;
2941  StringValueCStr(name);
2942  enc = rb_enc_get(name);
2943  if (!rb_enc_asciicompat(enc)) {
2944  rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
2945  rb_enc_name(enc));
2946  }
2947  name = rb_str_new_frozen(name);
2948  }
2949  target_th->name = name;
2950  if (threadptr_initialized(target_th)) {
2951  native_set_another_thread_name(target_th->thread_id, name);
2952  }
2953  return name;
2954 }
2955 
2956 /*
2957  * call-seq:
2958  * thr.to_s -> string
2959  *
2960  * Dump the name, id, and status of _thr_ to a string.
2961  */
2962 
2963 static VALUE
2964 rb_thread_to_s(VALUE thread)
2965 {
2966  VALUE cname = rb_class_path(rb_obj_class(thread));
2967  rb_thread_t *target_th = rb_thread_ptr(thread);
2968  const char *status;
2969  VALUE str;
2970 
2971  status = thread_status_name(target_th, TRUE);
2972  str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
2973  if (!NIL_P(target_th->name)) {
2974  rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
2975  }
2976  if (!target_th->first_func && target_th->first_proc) {
2977  VALUE loc = rb_proc_location(target_th->first_proc);
2978  if (!NIL_P(loc)) {
2979  const VALUE *ptr = RARRAY_CONST_PTR(loc);
2980  rb_str_catf(str, "@%"PRIsVALUE":%"PRIsVALUE, ptr[0], ptr[1]);
2981  rb_gc_force_recycle(loc);
2982  }
2983  }
2984  rb_str_catf(str, " %s>", status);
2985  OBJ_INFECT(str, thread);
2986 
2987  return str;
2988 }
2989 
2990 /* variables for recursive traversals */
2991 static ID recursive_key;
2992 
2993 static VALUE
2994 threadptr_local_aref(rb_thread_t *th, ID id)
2995 {
2996  if (id == recursive_key) {
2997  return th->ec.local_storage_recursive_hash;
2998  }
2999  else {
3000  st_data_t val;
3001  st_table *local_storage = th->ec.local_storage;
3002 
3003  if (local_storage != NULL && st_lookup(local_storage, id, &val)) {
3004  return (VALUE)val;
3005  }
3006  else {
3007  return Qnil;
3008  }
3009  }
3010 }
3011 
3012 VALUE
3014 {
3015  return threadptr_local_aref(rb_thread_ptr(thread), id);
3016 }
3017 
3018 /*
3019  * call-seq:
3020  * thr[sym] -> obj or nil
3021  *
3022  * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3023  * if not explicitly inside a Fiber), using either a symbol or a string name.
3024  * If the specified variable does not exist, returns +nil+.
3025  *
3026  * [
3027  * Thread.new { Thread.current["name"] = "A" },
3028  * Thread.new { Thread.current[:name] = "B" },
3029  * Thread.new { Thread.current["name"] = "C" }
3030  * ].each do |th|
3031  * th.join
3032  * puts "#{th.inspect}: #{th[:name]}"
3033  * end
3034  *
3035  * This will produce:
3036  *
3037  * #<Thread:0x00000002a54220 dead>: A
3038  * #<Thread:0x00000002a541a8 dead>: B
3039  * #<Thread:0x00000002a54130 dead>: C
3040  *
3041  * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3042  * This confusion did not exist in Ruby 1.8 because
3043  * fibers are only available since Ruby 1.9.
3044  * Ruby 1.9 chooses that the methods behaves fiber-local to save
3045  * following idiom for dynamic scope.
3046  *
3047  * def meth(newvalue)
3048  * begin
3049  * oldvalue = Thread.current[:name]
3050  * Thread.current[:name] = newvalue
3051  * yield
3052  * ensure
3053  * Thread.current[:name] = oldvalue
3054  * end
3055  * end
3056  *
3057  * The idiom may not work as dynamic scope if the methods are thread-local
3058  * and a given block switches fiber.
3059  *
3060  * f = Fiber.new {
3061  * meth(1) {
3062  * Fiber.yield
3063  * }
3064  * }
3065  * meth(2) {
3066  * f.resume
3067  * }
3068  * f.resume
3069  * p Thread.current[:name]
3070  * #=> nil if fiber-local
3071  * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3072  *
3073  * For thread-local variables, please see #thread_variable_get and
3074  * #thread_variable_set.
3075  *
3076  */
3077 
3078 static VALUE
3079 rb_thread_aref(VALUE thread, VALUE key)
3080 {
3081  ID id = rb_check_id(&key);
3082  if (!id) return Qnil;
3083  return rb_thread_local_aref(thread, id);
3084 }
3085 
3086 static VALUE
3087 rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3088 {
3089  VALUE key, val;
3090  ID id;
3091  rb_thread_t *target_th = rb_thread_ptr(self);
3092  int block_given;
3093 
3094  rb_check_arity(argc, 1, 2);
3095  key = argv[0];
3096 
3097  block_given = rb_block_given_p();
3098  if (block_given && argc == 2) {
3099  rb_warn("block supersedes default value argument");
3100  }
3101 
3102  id = rb_check_id(&key);
3103 
3104  if (id == recursive_key) {
3105  return target_th->ec.local_storage_recursive_hash;
3106  }
3107  else if (id && target_th->ec.local_storage &&
3108  st_lookup(target_th->ec.local_storage, id, &val)) {
3109  return val;
3110  }
3111  else if (block_given) {
3112  return rb_yield(key);
3113  }
3114  else if (argc == 1) {
3115  rb_raise(rb_eKeyError, "key not found: %"PRIsVALUE, key);
3116  }
3117  else {
3118  return argv[1];
3119  }
3120 }
3121 
3122 static VALUE
3123 threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3124 {
3125  if (id == recursive_key) {
3127  return val;
3128  }
3129  else {
3130  st_table *local_storage = th->ec.local_storage;
3131 
3132  if (NIL_P(val)) {
3133  if (!local_storage) return Qnil;
3134  st_delete_wrap(local_storage, id);
3135  return Qnil;
3136  }
3137  else {
3138  if (local_storage == NULL) {
3139  th->ec.local_storage = local_storage = st_init_numtable();
3140  }
3141  st_insert(local_storage, id, val);
3142  return val;
3143  }
3144  }
3145 }
3146 
3147 VALUE
3149 {
3150  if (OBJ_FROZEN(thread)) {
3151  rb_error_frozen("thread locals");
3152  }
3153 
3154  return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3155 }
3156 
3157 /*
3158  * call-seq:
3159  * thr[sym] = obj -> obj
3160  *
3161  * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3162  * using either a symbol or a string.
3163  *
3164  * See also Thread#[].
3165  *
3166  * For thread-local variables, please see #thread_variable_set and
3167  * #thread_variable_get.
3168  */
3169 
3170 static VALUE
3171 rb_thread_aset(VALUE self, VALUE id, VALUE val)
3172 {
3173  return rb_thread_local_aset(self, rb_to_id(id), val);
3174 }
3175 
3176 /*
3177  * call-seq:
3178  * thr.thread_variable_get(key) -> obj or nil
3179  *
3180  * Returns the value of a thread local variable that has been set. Note that
3181  * these are different than fiber local values. For fiber local values,
3182  * please see Thread#[] and Thread#[]=.
3183  *
3184  * Thread local values are carried along with threads, and do not respect
3185  * fibers. For example:
3186  *
3187  * Thread.new {
3188  * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3189  * Thread.current["foo"] = "bar" # set a fiber local
3190  *
3191  * Fiber.new {
3192  * Fiber.yield [
3193  * Thread.current.thread_variable_get("foo"), # get the thread local
3194  * Thread.current["foo"], # get the fiber local
3195  * ]
3196  * }.resume
3197  * }.join.value # => ['bar', nil]
3198  *
3199  * The value "bar" is returned for the thread local, where nil is returned
3200  * for the fiber local. The fiber is executed in the same thread, so the
3201  * thread local values are available.
3202  */
3203 
3204 static VALUE
3205 rb_thread_variable_get(VALUE thread, VALUE key)
3206 {
3207  VALUE locals;
3208 
3209  locals = rb_ivar_get(thread, id_locals);
3210  return rb_hash_aref(locals, rb_to_symbol(key));
3211 }
3212 
3213 /*
3214  * call-seq:
3215  * thr.thread_variable_set(key, value)
3216  *
3217  * Sets a thread local with +key+ to +value+. Note that these are local to
3218  * threads, and not to fibers. Please see Thread#thread_variable_get and
3219  * Thread#[] for more information.
3220  */
3221 
3222 static VALUE
3223 rb_thread_variable_set(VALUE thread, VALUE id, VALUE val)
3224 {
3225  VALUE locals;
3226 
3227  if (OBJ_FROZEN(thread)) {
3228  rb_error_frozen("thread locals");
3229  }
3230 
3231  locals = rb_ivar_get(thread, id_locals);
3232  return rb_hash_aset(locals, rb_to_symbol(id), val);
3233 }
3234 
3235 /*
3236  * call-seq:
3237  * thr.key?(sym) -> true or false
3238  *
3239  * Returns +true+ if the given string (or symbol) exists as a fiber-local
3240  * variable.
3241  *
3242  * me = Thread.current
3243  * me[:oliver] = "a"
3244  * me.key?(:oliver) #=> true
3245  * me.key?(:stanley) #=> false
3246  */
3247 
3248 static VALUE
3249 rb_thread_key_p(VALUE self, VALUE key)
3250 {
3251  ID id = rb_check_id(&key);
3252  st_table *local_storage = rb_thread_ptr(self)->ec.local_storage;
3253 
3254  if (!id || local_storage == NULL) {
3255  return Qfalse;
3256  }
3257  else if (st_lookup(local_storage, id, 0)) {
3258  return Qtrue;
3259  }
3260  else {
3261  return Qfalse;
3262  }
3263 }
3264 
3265 static int
3266 thread_keys_i(ID key, VALUE value, VALUE ary)
3267 {
3268  rb_ary_push(ary, ID2SYM(key));
3269  return ST_CONTINUE;
3270 }
3271 
3272 int
3274 {
3275  return vm_living_thread_num(GET_VM()) == 1;
3276 }
3277 
3278 /*
3279  * call-seq:
3280  * thr.keys -> array
3281  *
3282  * Returns an array of the names of the fiber-local variables (as Symbols).
3283  *
3284  * thr = Thread.new do
3285  * Thread.current[:cat] = 'meow'
3286  * Thread.current["dog"] = 'woof'
3287  * end
3288  * thr.join #=> #<Thread:0x401b3f10 dead>
3289  * thr.keys #=> [:dog, :cat]
3290  */
3291 
3292 static VALUE
3293 rb_thread_keys(VALUE self)
3294 {
3295  st_table *local_storage = rb_thread_ptr(self)->ec.local_storage;
3296  VALUE ary = rb_ary_new();
3297 
3298  if (local_storage) {
3299  st_foreach(local_storage, thread_keys_i, ary);
3300  }
3301  return ary;
3302 }
3303 
3304 static int
3305 keys_i(VALUE key, VALUE value, VALUE ary)
3306 {
3307  rb_ary_push(ary, key);
3308  return ST_CONTINUE;
3309 }
3310 
3311 /*
3312  * call-seq:
3313  * thr.thread_variables -> array
3314  *
3315  * Returns an array of the names of the thread-local variables (as Symbols).
3316  *
3317  * thr = Thread.new do
3318  * Thread.current.thread_variable_set(:cat, 'meow')
3319  * Thread.current.thread_variable_set("dog", 'woof')
3320  * end
3321  * thr.join #=> #<Thread:0x401b3f10 dead>
3322  * thr.thread_variables #=> [:dog, :cat]
3323  *
3324  * Note that these are not fiber local variables. Please see Thread#[] and
3325  * Thread#thread_variable_get for more details.
3326  */
3327 
3328 static VALUE
3329 rb_thread_variables(VALUE thread)
3330 {
3331  VALUE locals;
3332  VALUE ary;
3333 
3334  locals = rb_ivar_get(thread, id_locals);
3335  ary = rb_ary_new();
3336  rb_hash_foreach(locals, keys_i, ary);
3337 
3338  return ary;
3339 }
3340 
3341 /*
3342  * call-seq:
3343  * thr.thread_variable?(key) -> true or false
3344  *
3345  * Returns +true+ if the given string (or symbol) exists as a thread-local
3346  * variable.
3347  *
3348  * me = Thread.current
3349  * me.thread_variable_set(:oliver, "a")
3350  * me.thread_variable?(:oliver) #=> true
3351  * me.thread_variable?(:stanley) #=> false
3352  *
3353  * Note that these are not fiber local variables. Please see Thread#[] and
3354  * Thread#thread_variable_get for more details.
3355  */
3356 
3357 static VALUE
3358 rb_thread_variable_p(VALUE thread, VALUE key)
3359 {
3360  VALUE locals;
3361  ID id = rb_check_id(&key);
3362 
3363  if (!id) return Qfalse;
3364 
3365  locals = rb_ivar_get(thread, id_locals);
3366 
3367  if (!RHASH(locals)->ntbl)
3368  return Qfalse;
3369 
3370  if (st_lookup(RHASH(locals)->ntbl, ID2SYM(id), 0)) {
3371  return Qtrue;
3372  }
3373 
3374  return Qfalse;
3375 }
3376 
3377 /*
3378  * call-seq:
3379  * thr.priority -> integer
3380  *
3381  * Returns the priority of <i>thr</i>. Default is inherited from the
3382  * current thread which creating the new thread, or zero for the
3383  * initial main thread; higher-priority thread will run more frequently
3384  * than lower-priority threads (but lower-priority threads can also run).
3385  *
3386  * This is just hint for Ruby thread scheduler. It may be ignored on some
3387  * platform.
3388  *
3389  * Thread.current.priority #=> 0
3390  */
3391 
3392 static VALUE
3393 rb_thread_priority(VALUE thread)
3394 {
3395  return INT2NUM(rb_thread_ptr(thread)->priority);
3396 }
3397 
3398 
3399 /*
3400  * call-seq:
3401  * thr.priority= integer -> thr
3402  *
3403  * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3404  * will run more frequently than lower-priority threads (but lower-priority
3405  * threads can also run).
3406  *
3407  * This is just hint for Ruby thread scheduler. It may be ignored on some
3408  * platform.
3409  *
3410  * count1 = count2 = 0
3411  * a = Thread.new do
3412  * loop { count1 += 1 }
3413  * end
3414  * a.priority = -1
3415  *
3416  * b = Thread.new do
3417  * loop { count2 += 1 }
3418  * end
3419  * b.priority = -2
3420  * sleep 1 #=> 1
3421  * count1 #=> 622504
3422  * count2 #=> 5832
3423  */
3424 
3425 static VALUE
3426 rb_thread_priority_set(VALUE thread, VALUE prio)
3427 {
3428  rb_thread_t *target_th = rb_thread_ptr(thread);
3429  int priority;
3430 
3431 #if USE_NATIVE_THREAD_PRIORITY
3432  target_th->priority = NUM2INT(prio);
3433  native_thread_apply_priority(th);
3434 #else
3435  priority = NUM2INT(prio);
3436  if (priority > RUBY_THREAD_PRIORITY_MAX) {
3437  priority = RUBY_THREAD_PRIORITY_MAX;
3438  }
3439  else if (priority < RUBY_THREAD_PRIORITY_MIN) {
3440  priority = RUBY_THREAD_PRIORITY_MIN;
3441  }
3442  target_th->priority = priority;
3443 #endif
3444  return INT2NUM(target_th->priority);
3445 }
3446 
3447 /* for IO */
3448 
3449 #if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
3450 
3451 /*
3452  * several Unix platforms support file descriptors bigger than FD_SETSIZE
3453  * in select(2) system call.
3454  *
3455  * - Linux 2.2.12 (?)
3456  * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
3457  * select(2) documents how to allocate fd_set dynamically.
3458  * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
3459  * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
3460  * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
3461  * select(2) documents how to allocate fd_set dynamically.
3462  * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
3463  * - HP-UX documents how to allocate fd_set dynamically.
3464  * http://docs.hp.com/en/B2355-60105/select.2.html
3465  * - Solaris 8 has select_large_fdset
3466  * - Mac OS X 10.7 (Lion)
3467  * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
3468  * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
3469  * http://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/_index.html
3470  *
3471  * When fd_set is not big enough to hold big file descriptors,
3472  * it should be allocated dynamically.
3473  * Note that this assumes fd_set is structured as bitmap.
3474  *
3475  * rb_fd_init allocates the memory.
3476  * rb_fd_term free the memory.
3477  * rb_fd_set may re-allocates bitmap.
3478  *
3479  * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
3480  */
3481 
3482 void
3483 rb_fd_init(rb_fdset_t *fds)
3484 {
3485  fds->maxfd = 0;
3486  fds->fdset = ALLOC(fd_set);
3487  FD_ZERO(fds->fdset);
3488 }
3489 
3490 void
3492 {
3493  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3494 
3495  if (size < sizeof(fd_set))
3496  size = sizeof(fd_set);
3497  dst->maxfd = src->maxfd;
3498  dst->fdset = xmalloc(size);
3499  memcpy(dst->fdset, src->fdset, size);
3500 }
3501 
3502 void
3503 rb_fd_term(rb_fdset_t *fds)
3504 {
3505  if (fds->fdset) xfree(fds->fdset);
3506  fds->maxfd = 0;
3507  fds->fdset = 0;
3508 }
3509 
3510 void
3511 rb_fd_zero(rb_fdset_t *fds)
3512 {
3513  if (fds->fdset)
3514  MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
3515 }
3516 
3517 static void
3518 rb_fd_resize(int n, rb_fdset_t *fds)
3519 {
3520  size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
3521  size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
3522 
3523  if (m < sizeof(fd_set)) m = sizeof(fd_set);
3524  if (o < sizeof(fd_set)) o = sizeof(fd_set);
3525 
3526  if (m > o) {
3527  fds->fdset = xrealloc(fds->fdset, m);
3528  memset((char *)fds->fdset + o, 0, m - o);
3529  }
3530  if (n >= fds->maxfd) fds->maxfd = n + 1;
3531 }
3532 
3533 void
3534 rb_fd_set(int n, rb_fdset_t *fds)
3535 {
3536  rb_fd_resize(n, fds);
3537  FD_SET(n, fds->fdset);
3538 }
3539 
3540 void
3541 rb_fd_clr(int n, rb_fdset_t *fds)
3542 {
3543  if (n >= fds->maxfd) return;
3544  FD_CLR(n, fds->fdset);
3545 }
3546 
3547 int
3548 rb_fd_isset(int n, const rb_fdset_t *fds)
3549 {
3550  if (n >= fds->maxfd) return 0;
3551  return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
3552 }
3553 
3554 void
3555 rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
3556 {
3557  size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
3558 
3559  if (size < sizeof(fd_set)) size = sizeof(fd_set);
3560  dst->maxfd = max;
3561  dst->fdset = xrealloc(dst->fdset, size);
3562  memcpy(dst->fdset, src, size);
3563 }
3564 
3565 void
3566 rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
3567 {
3568  size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
3569 
3570  if (size < sizeof(fd_set))
3571  size = sizeof(fd_set);
3572  dst->maxfd = src->maxfd;
3573  dst->fdset = xrealloc(dst->fdset, size);
3574  memcpy(dst->fdset, src->fdset, size);
3575 }
3576 
3577 #ifdef __native_client__
3578 int select(int nfds, fd_set *readfds, fd_set *writefds,
3579  fd_set *exceptfds, struct timeval *timeout);
3580 #endif
3581 
3582 int
3583 rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
3584 {
3585  fd_set *r = NULL, *w = NULL, *e = NULL;
3586  if (readfds) {
3587  rb_fd_resize(n - 1, readfds);
3588  r = rb_fd_ptr(readfds);
3589  }
3590  if (writefds) {
3591  rb_fd_resize(n - 1, writefds);
3592  w = rb_fd_ptr(writefds);
3593  }
3594  if (exceptfds) {
3595  rb_fd_resize(n - 1, exceptfds);
3596  e = rb_fd_ptr(exceptfds);
3597  }
3598  return select(n, r, w, e, timeout);
3599 }
3600 
3601 #define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
3602 
3603 #undef FD_ZERO
3604 #undef FD_SET
3605 #undef FD_CLR
3606 #undef FD_ISSET
3607 
3608 #define FD_ZERO(f) rb_fd_zero(f)
3609 #define FD_SET(i, f) rb_fd_set((i), (f))
3610 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3611 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3612 
3613 #elif defined(_WIN32)
3614 
3615 void
3616 rb_fd_init(rb_fdset_t *set)
3617 {
3618  set->capa = FD_SETSIZE;
3619  set->fdset = ALLOC(fd_set);
3620  FD_ZERO(set->fdset);
3621 }
3622 
3623 void
3625 {
3626  rb_fd_init(dst);
3627  rb_fd_dup(dst, src);
3628 }
3629 
3630 void
3631 rb_fd_term(rb_fdset_t *set)
3632 {
3633  xfree(set->fdset);
3634  set->fdset = NULL;
3635  set->capa = 0;
3636 }
3637 
3638 void
3639 rb_fd_set(int fd, rb_fdset_t *set)
3640 {
3641  unsigned int i;
3642  SOCKET s = rb_w32_get_osfhandle(fd);
3643 
3644  for (i = 0; i < set->fdset->fd_count; i++) {
3645  if (set->fdset->fd_array[i] == s) {
3646  return;
3647  }
3648  }
3649  if (set->fdset->fd_count >= (unsigned)set->capa) {
3650  set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
3651  set->fdset = xrealloc(set->fdset, sizeof(unsigned int) + sizeof(SOCKET) * set->capa);
3652  }
3653  set->fdset->fd_array[set->fdset->fd_count++] = s;
3654 }
3655 
3656 #undef FD_ZERO
3657 #undef FD_SET
3658 #undef FD_CLR
3659 #undef FD_ISSET
3660 
3661 #define FD_ZERO(f) rb_fd_zero(f)
3662 #define FD_SET(i, f) rb_fd_set((i), (f))
3663 #define FD_CLR(i, f) rb_fd_clr((i), (f))
3664 #define FD_ISSET(i, f) rb_fd_isset((i), (f))
3665 
3666 #define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
3667 
3668 #endif
3669 
3670 #ifndef rb_fd_no_init
3671 #define rb_fd_no_init(fds) (void)(fds)
3672 #endif
3673 
3674 static inline int
3675 retryable(int e)
3676 {
3677  if (e == EINTR) return TRUE;
3678 #ifdef ERESTART
3679  if (e == ERESTART) return TRUE;
3680 #endif
3681  return FALSE;
3682 }
3683 
3684 #define restore_fdset(fds1, fds2) \
3685  ((fds1) ? rb_fd_dup(fds1, fds2) : (void)0)
3686 
3687 static inline void
3688 update_timeval(struct timeval *timeout, double limit)
3689 {
3690  if (timeout) {
3691  double d = limit - timeofday();
3692 
3693  timeout->tv_sec = (time_t)d;
3694  timeout->tv_usec = (int)((d-(double)timeout->tv_sec)*1e6);
3695  if (timeout->tv_sec < 0) timeout->tv_sec = 0;
3696  if (timeout->tv_usec < 0) timeout->tv_usec = 0;
3697  }
3698 }
3699 
3700 static int
3701 do_select(int n, rb_fdset_t *const readfds, rb_fdset_t *const writefds,
3702  rb_fdset_t *const exceptfds, struct timeval *timeout)
3703 {
3704  int MAYBE_UNUSED(result);
3705  int lerrno;
3706  rb_fdset_t MAYBE_UNUSED(orig_read);
3707  rb_fdset_t MAYBE_UNUSED(orig_write);
3708  rb_fdset_t MAYBE_UNUSED(orig_except);
3709  double limit = 0;
3710  struct timeval wait_rest;
3711  rb_thread_t *th = GET_THREAD();
3712 
3713 #define do_select_update() \
3714  (restore_fdset(readfds, &orig_read), \
3715  restore_fdset(writefds, &orig_write), \
3716  restore_fdset(exceptfds, &orig_except), \
3717  update_timeval(timeout, limit), \
3718  TRUE)
3719 
3720  if (timeout) {
3721  limit = timeofday();
3722  limit += (double)timeout->tv_sec+(double)timeout->tv_usec*1e-6;
3723  wait_rest = *timeout;
3724  timeout = &wait_rest;
3725  }
3726 
3727 #define fd_init_copy(f) \
3728  (f##fds) ? rb_fd_init_copy(&orig_##f, f##fds) : rb_fd_no_init(&orig_##f)
3729  fd_init_copy(read);
3730  fd_init_copy(write);
3731  fd_init_copy(except);
3732 #undef fd_init_copy
3733 
3734  do {
3735  lerrno = 0;
3736 
3737  BLOCKING_REGION({
3738  result = native_fd_select(n, readfds, writefds, exceptfds,
3739  timeout, th);
3740  if (result < 0) lerrno = errno;
3741  }, ubf_select, th, FALSE);
3742 
3744  } while (result < 0 && retryable(errno = lerrno) && do_select_update());
3745 
3746 #define fd_term(f) if (f##fds) rb_fd_term(&orig_##f)
3747  fd_term(read);
3748  fd_term(write);
3749  fd_term(except);
3750 #undef fd_term
3751 
3752  return result;
3753 }
3754 
3755 static void
3756 rb_thread_wait_fd_rw(int fd, int read)
3757 {
3758  int result = 0;
3759  int events = read ? RB_WAITFD_IN : RB_WAITFD_OUT;
3760 
3761  thread_debug("rb_thread_wait_fd_rw(%d, %s)\n", fd, read ? "read" : "write");
3762 
3763  if (fd < 0) {
3764  rb_raise(rb_eIOError, "closed stream");
3765  }
3766 
3767  result = rb_wait_for_single_fd(fd, events, NULL);
3768  if (result < 0) {
3769  rb_sys_fail(0);
3770  }
3771 
3772  thread_debug("rb_thread_wait_fd_rw(%d, %s): done\n", fd, read ? "read" : "write");
3773 }
3774 
3775 void
3777 {
3778  rb_thread_wait_fd_rw(fd, 1);
3779 }
3780 
3781 int
3783 {
3784  rb_thread_wait_fd_rw(fd, 0);
3785  return TRUE;
3786 }
3787 
3788 int
3789 rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
3790  struct timeval *timeout)
3791 {
3792  if (!read && !write && !except) {
3793  if (!timeout) {
3795  return 0;
3796  }
3797  rb_thread_wait_for(*timeout);
3798  return 0;
3799  }
3800 
3801  if (read) {
3802  rb_fd_resize(max - 1, read);
3803  }
3804  if (write) {
3805  rb_fd_resize(max - 1, write);
3806  }
3807  if (except) {
3808  rb_fd_resize(max - 1, except);
3809  }
3810  return do_select(max, read, write, except, timeout);
3811 }
3812 
3813 /*
3814  * poll() is supported by many OSes, but so far Linux is the only
3815  * one we know of that supports using poll() in all places select()
3816  * would work.
3817  */
3818 #if defined(HAVE_POLL) && defined(__linux__)
3819 # define USE_POLL
3820 #endif
3821 
3822 #ifdef USE_POLL
3823 
3824 /* The same with linux kernel. TODO: make platform independent definition. */
3825 #define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
3826 #define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
3827 #define POLLEX_SET (POLLPRI)
3828 
3829 #ifndef HAVE_PPOLL
3830 /* TODO: don't ignore sigmask */
3831 int
3832 ppoll(struct pollfd *fds, nfds_t nfds,
3833  const struct timespec *ts, const sigset_t *sigmask)
3834 {
3835  int timeout_ms;
3836 
3837  if (ts) {
3838  int tmp, tmp2;
3839 
3840  if (ts->tv_sec > INT_MAX/1000)
3841  timeout_ms = -1;
3842  else {
3843  tmp = (int)(ts->tv_sec * 1000);
3844  tmp2 = (int)(ts->tv_nsec / (1000 * 1000));
3845  if (INT_MAX - tmp < tmp2)
3846  timeout_ms = -1;
3847  else
3848  timeout_ms = (int)(tmp + tmp2);
3849  }
3850  }
3851  else
3852  timeout_ms = -1;
3853 
3854  return poll(fds, nfds, timeout_ms);
3855 }
3856 #endif
3857 
3858 static inline void
3859 update_timespec(struct timespec *timeout, double limit)
3860 {
3861  if (timeout) {
3862  double d = limit - timeofday();
3863 
3864  timeout->tv_sec = (long)d;
3865  timeout->tv_nsec = (long)((d-(double)timeout->tv_sec)*1e9);
3866  if (timeout->tv_sec < 0) timeout->tv_sec = 0;
3867  if (timeout->tv_nsec < 0) timeout->tv_nsec = 0;
3868  }
3869 }
3870 
3871 /*
3872  * returns a mask of events
3873  */
3874 int
3875 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3876 {
3877  struct pollfd fds;
3878  int result = 0, lerrno;
3879  double limit = 0;
3880  struct timespec ts;
3881  struct timespec *timeout = NULL;
3882  rb_thread_t *th = GET_THREAD();
3883 
3884 #define poll_update() \
3885  (update_timespec(timeout, limit), \
3886  TRUE)
3887 
3888  if (tv) {
3889  ts.tv_sec = tv->tv_sec;
3890  ts.tv_nsec = tv->tv_usec * 1000;
3891  limit = timeofday();
3892  limit += (double)tv->tv_sec + (double)tv->tv_usec * 1e-6;
3893  timeout = &ts;
3894  }
3895 
3896  fds.fd = fd;
3897  fds.events = (short)events;
3898 
3899  do {
3900  fds.revents = 0;
3901  lerrno = 0;
3902  BLOCKING_REGION({
3903  result = ppoll(&fds, 1, timeout, NULL);
3904  if (result < 0) lerrno = errno;
3905  }, ubf_select, th, FALSE);
3906 
3908  } while (result < 0 && retryable(errno = lerrno) && poll_update());
3909  if (result < 0) return -1;
3910 
3911  if (fds.revents & POLLNVAL) {
3912  errno = EBADF;
3913  return -1;
3914  }
3915 
3916  /*
3917  * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
3918  * Therefore we need to fix it up.
3919  */
3920  result = 0;
3921  if (fds.revents & POLLIN_SET)
3922  result |= RB_WAITFD_IN;
3923  if (fds.revents & POLLOUT_SET)
3924  result |= RB_WAITFD_OUT;
3925  if (fds.revents & POLLEX_SET)
3926  result |= RB_WAITFD_PRI;
3927 
3928  return result;
3929 }
3930 #else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
3931 static rb_fdset_t *
3932 init_set_fd(int fd, rb_fdset_t *fds)
3933 {
3934  if (fd < 0) {
3935  return 0;
3936  }
3937  rb_fd_init(fds);
3938  rb_fd_set(fd, fds);
3939 
3940  return fds;
3941 }
3942 
3943 struct select_args {
3944  union {
3945  int fd;
3946  int error;
3947  } as;
3951  struct timeval *tv;
3952 };
3953 
3954 static VALUE
3955 select_single(VALUE ptr)
3956 {
3957  struct select_args *args = (struct select_args *)ptr;
3958  int r;
3959 
3960  r = rb_thread_fd_select(args->as.fd + 1,
3961  args->read, args->write, args->except, args->tv);
3962  if (r == -1)
3963  args->as.error = errno;
3964  if (r > 0) {
3965  r = 0;
3966  if (args->read && rb_fd_isset(args->as.fd, args->read))
3967  r |= RB_WAITFD_IN;
3968  if (args->write && rb_fd_isset(args->as.fd, args->write))
3969  r |= RB_WAITFD_OUT;
3970  if (args->except && rb_fd_isset(args->as.fd, args->except))
3971  r |= RB_WAITFD_PRI;
3972  }
3973  return (VALUE)r;
3974 }
3975 
3976 static VALUE
3977 select_single_cleanup(VALUE ptr)
3978 {
3979  struct select_args *args = (struct select_args *)ptr;
3980 
3981  if (args->read) rb_fd_term(args->read);
3982  if (args->write) rb_fd_term(args->write);
3983  if (args->except) rb_fd_term(args->except);
3984 
3985  return (VALUE)-1;
3986 }
3987 
3988 int
3989 rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
3990 {
3991  rb_fdset_t rfds, wfds, efds;
3992  struct select_args args;
3993  int r;
3994  VALUE ptr = (VALUE)&args;
3995 
3996  args.as.fd = fd;
3997  args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
3998  args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
3999  args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4000  args.tv = tv;
4001 
4002  r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4003  if (r == -1)
4004  errno = args.as.error;
4005 
4006  return r;
4007 }
4008 #endif /* ! USE_POLL */
4009 
4010 /*
4011  * for GC
4012  */
4013 
4014 #ifdef USE_CONSERVATIVE_STACK_END
4015 void
4017 {
4018  VALUE stack_end;
4019  *stack_end_p = &stack_end;
4020 }
4021 #endif
4022 
4023 
4024 /*
4025  *
4026  */
4027 
4028 void
4030 {
4031  /* mth must be main_thread */
4032  if (rb_signal_buff_size() > 0) {
4033  /* wakeup main thread */
4035  }
4036 }
4037 
4038 static void
4039 timer_thread_function(void *arg)
4040 {
4041  rb_vm_t *vm = GET_VM(); /* TODO: fix me for Multi-VM */
4042 
4043  /*
4044  * Tricky: thread_destruct_lock doesn't close a race against
4045  * vm->running_thread switch. however it guarantees th->running_thread
4046  * point to valid pointer or NULL.
4047  */
4048  native_mutex_lock(&vm->thread_destruct_lock);
4049  /* for time slice */
4050  if (vm->running_thread)
4052  native_mutex_unlock(&vm->thread_destruct_lock);
4053 
4054  /* check signal */
4056 
4057 #if 0
4058  /* prove profiler */
4059  if (vm->prove_profile.enable) {
4060  rb_thread_t *th = vm->running_thread;
4061 
4062  if (vm->during_gc) {
4063  /* GC prove profiling */
4064  }
4065  }
4066 #endif
4067 }
4068 
4069 void
4071 {
4072  if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4073  native_reset_timer_thread();
4074  }
4075 }
4076 
4077 void
4079 {
4080  native_reset_timer_thread();
4081 }
4082 
4083 void
4085 {
4086  system_working = 1;
4087  rb_thread_create_timer_thread();
4088 }
4089 
4090 #if defined(HAVE_WORKING_FORK)
4091 static int
4092 clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4093 {
4094  int i;
4095  VALUE coverage = (VALUE)val;
4096  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4097  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4098  VALUE methods = RARRAY_AREF(coverage, COVERAGE_INDEX_METHODS);
4099 
4100  if (lines) {
4101  for (i = 0; i < RARRAY_LEN(lines); i++) {
4102  if (RARRAY_AREF(lines, i) != Qnil) {
4103  RARRAY_ASET(lines, i, INT2FIX(0));
4104  }
4105  }
4106  }
4107  if (branches) {
4108  VALUE counters = RARRAY_AREF(branches, 1);
4109  for (i = 0; i < RARRAY_LEN(counters); i++) {
4110  RARRAY_ASET(counters, i, INT2FIX(0));
4111  }
4112  }
4113  if (methods) {
4114  for (i = 2; i < RARRAY_LEN(methods); i += 3) {
4115  RARRAY_ASET(methods, i, INT2FIX(0));
4116  }
4117  }
4118 
4119  return ST_CONTINUE;
4120 }
4121 
4122 static void
4123 clear_coverage(void)
4124 {
4125  VALUE coverages = rb_get_coverages();
4126  if (RTEST(coverages)) {
4127  st_foreach(rb_hash_tbl_raw(coverages), clear_coverage_i, 0);
4128  }
4129 }
4130 
4131 static void
4132 rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4133 {
4134  rb_thread_t *i = 0;
4135  rb_vm_t *vm = th->vm;
4136  vm->main_thread = th;
4137 
4138  gvl_atfork(th->vm);
4139 
4140  list_for_each(&vm->living_threads, i, vmlt_node) {
4141  atfork(i, th);
4142  }
4143  rb_vm_living_threads_init(vm);
4144  rb_vm_living_threads_insert(vm, th);
4145  vm->sleeper = 0;
4146  clear_coverage();
4147 }
4148 
4149 static void
4150 terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4151 {
4152  if (th != current_th) {
4153  rb_mutex_abandon_keeping_mutexes(th);
4154  rb_mutex_abandon_locking_mutex(th);
4155  thread_cleanup_func(th, TRUE);
4156  }
4157 }
4158 
4159 void
4160 rb_thread_atfork(void)
4161 {
4162  rb_thread_t *th = GET_THREAD();
4163  rb_thread_atfork_internal(th, terminate_atfork_i);
4164  th->join_list = NULL;
4165 
4166  /* We don't want reproduce CVE-2003-0900. */
4168 }
4169 
4170 static void
4171 terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4172 {
4173  if (th != current_th) {
4174  thread_cleanup_func_before_exec(th);
4175  }
4176 }
4177 
4178 void
4180 {
4181  rb_thread_t *th = GET_THREAD();
4182  rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4183 }
4184 #else
4185 void
4187 {
4188 }
4189 
4190 void
4192 {
4193 }
4194 #endif
4195 
4196 struct thgroup {
4199 };
4200 
4201 static size_t
4202 thgroup_memsize(const void *ptr)
4203 {
4204  return sizeof(struct thgroup);
4205 }
4206 
4207 static const rb_data_type_t thgroup_data_type = {
4208  "thgroup",
4209  {NULL, RUBY_TYPED_DEFAULT_FREE, thgroup_memsize,},
4211 };
4212 
4213 /*
4214  * Document-class: ThreadGroup
4215  *
4216  * ThreadGroup provides a means of keeping track of a number of threads as a
4217  * group.
4218  *
4219  * A given Thread object can only belong to one ThreadGroup at a time; adding
4220  * a thread to a new group will remove it from any previous group.
4221  *
4222  * Newly created threads belong to the same group as the thread from which they
4223  * were created.
4224  */
4225 
4226 /*
4227  * Document-const: Default
4228  *
4229  * The default ThreadGroup created when Ruby starts; all Threads belong to it
4230  * by default.
4231  */
4232 static VALUE
4233 thgroup_s_alloc(VALUE klass)
4234 {
4235  VALUE group;
4236  struct thgroup *data;
4237 
4238  group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4239  data->enclosed = 0;
4240  data->group = group;
4241 
4242  return group;
4243 }
4244 
4245 /*
4246  * call-seq:
4247  * thgrp.list -> array
4248  *
4249  * Returns an array of all existing Thread objects that belong to this group.
4250  *
4251  * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4252  */
4253 
4254 static VALUE
4255 thgroup_list(VALUE group)
4256 {
4257  VALUE ary = rb_ary_new();
4258  rb_vm_t *vm = GET_THREAD()->vm;
4259  rb_thread_t *th = 0;
4260 
4261  list_for_each(&vm->living_threads, th, vmlt_node) {
4262  if (th->thgroup == group) {
4263  rb_ary_push(ary, th->self);
4264  }
4265  }
4266  return ary;
4267 }
4268 
4269 
4270 /*
4271  * call-seq:
4272  * thgrp.enclose -> thgrp
4273  *
4274  * Prevents threads from being added to or removed from the receiving
4275  * ThreadGroup.
4276  *
4277  * New threads can still be started in an enclosed ThreadGroup.
4278  *
4279  * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4280  * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4281  * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4282  * tg.add thr
4283  * #=> ThreadError: can't move from the enclosed thread group
4284  */
4285 
4286 static VALUE
4287 thgroup_enclose(VALUE group)
4288 {
4289  struct thgroup *data;
4290 
4291  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4292  data->enclosed = 1;
4293 
4294  return group;
4295 }
4296 
4297 
4298 /*
4299  * call-seq:
4300  * thgrp.enclosed? -> true or false
4301  *
4302  * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4303  */
4304 
4305 static VALUE
4306 thgroup_enclosed_p(VALUE group)
4307 {
4308  struct thgroup *data;
4309 
4310  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4311  if (data->enclosed)
4312  return Qtrue;
4313  return Qfalse;
4314 }
4315 
4316 
4317 /*
4318  * call-seq:
4319  * thgrp.add(thread) -> thgrp
4320  *
4321  * Adds the given +thread+ to this group, removing it from any other
4322  * group to which it may have previously been a member.
4323  *
4324  * puts "Initial group is #{ThreadGroup::Default.list}"
4325  * tg = ThreadGroup.new
4326  * t1 = Thread.new { sleep }
4327  * t2 = Thread.new { sleep }
4328  * puts "t1 is #{t1}"
4329  * puts "t2 is #{t2}"
4330  * tg.add(t1)
4331  * puts "Initial group now #{ThreadGroup::Default.list}"
4332  * puts "tg group now #{tg.list}"
4333  *
4334  * This will produce:
4335  *
4336  * Initial group is #<Thread:0x401bdf4c>
4337  * t1 is #<Thread:0x401b3c90>
4338  * t2 is #<Thread:0x401b3c18>
4339  * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4340  * tg group now #<Thread:0x401b3c90>
4341  */
4342 
4343 static VALUE
4344 thgroup_add(VALUE group, VALUE thread)
4345 {
4346  rb_thread_t *target_th = rb_thread_ptr(thread);
4347  struct thgroup *data;
4348 
4349  if (OBJ_FROZEN(group)) {
4350  rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4351  }
4352  TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4353  if (data->enclosed) {
4354  rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4355  }
4356 
4357  if (!target_th->thgroup) {
4358  return Qnil;
4359  }
4360 
4361  if (OBJ_FROZEN(target_th->thgroup)) {
4362  rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4363  }
4364  TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4365  if (data->enclosed) {
4367  "can't move from the enclosed thread group");
4368  }
4369 
4370  target_th->thgroup = group;
4371  return group;
4372 }
4373 
4374 /*
4375  * Document-class: ThreadShield
4376  */
4377 static void
4378 thread_shield_mark(void *ptr)
4379 {
4380  rb_gc_mark((VALUE)ptr);
4381 }
4382 
4383 static const rb_data_type_t thread_shield_data_type = {
4384  "thread_shield",
4385  {thread_shield_mark, 0, 0,},
4386  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
4387 };
4388 
4389 static VALUE
4390 thread_shield_alloc(VALUE klass)
4391 {
4392  return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
4393 }
4394 
4395 #define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
4396 #define THREAD_SHIELD_WAITING_MASK (FL_USER0|FL_USER1|FL_USER2|FL_USER3|FL_USER4|FL_USER5|FL_USER6|FL_USER7|FL_USER8|FL_USER9|FL_USER10|FL_USER11|FL_USER12|FL_USER13|FL_USER14|FL_USER15|FL_USER16|FL_USER17|FL_USER18|FL_USER19)
4397 #define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
4398 #define rb_thread_shield_waiting(b) (int)((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT)
4399 
4400 static inline void
4401 rb_thread_shield_waiting_inc(VALUE b)
4402 {
4403  unsigned int w = rb_thread_shield_waiting(b);
4404  w++;
4406  rb_raise(rb_eRuntimeError, "waiting count overflow");
4407  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4408  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4409 }
4410 
4411 static inline void
4412 rb_thread_shield_waiting_dec(VALUE b)
4413 {
4414  unsigned int w = rb_thread_shield_waiting(b);
4415  if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
4416  w--;
4417  RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
4418  RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
4419 }
4420 
4421 VALUE
4423 {
4424  VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
4425  rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
4426  return thread_shield;
4427 }
4428 
4429 /*
4430  * Wait a thread shield.
4431  *
4432  * Returns
4433  * true: acquired the thread shield
4434  * false: the thread shield was destroyed and no other threads waiting
4435  * nil: the thread shield was destroyed but still in use
4436  */
4437 VALUE
4439 {
4440  VALUE mutex = GetThreadShieldPtr(self);
4441  rb_mutex_t *m;
4442 
4443  if (!mutex) return Qfalse;
4444  GetMutexPtr(mutex, m);
4445  if (m->th == GET_THREAD()) return Qnil;
4446  rb_thread_shield_waiting_inc(self);
4447  rb_mutex_lock(mutex);
4448  rb_thread_shield_waiting_dec(self);
4449  if (DATA_PTR(self)) return Qtrue;
4450  rb_mutex_unlock(mutex);
4451  return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
4452 }
4453 
4454 static VALUE
4455 thread_shield_get_mutex(VALUE self)
4456 {
4457  VALUE mutex = GetThreadShieldPtr(self);
4458  if (!mutex)
4459  rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
4460  return mutex;
4461 }
4462 
4463 /*
4464  * Release a thread shield, and return true if it has waiting threads.
4465  */
4466 VALUE
4468 {
4469  VALUE mutex = thread_shield_get_mutex(self);
4470  rb_mutex_unlock(mutex);
4471  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4472 }
4473 
4474 /*
4475  * Release and destroy a thread shield, and return true if it has waiting threads.
4476  */
4477 VALUE
4479 {
4480  VALUE mutex = thread_shield_get_mutex(self);
4481  DATA_PTR(self) = 0;
4482  rb_mutex_unlock(mutex);
4483  return rb_thread_shield_waiting(self) > 0 ? Qtrue : Qfalse;
4484 }
4485 
4486 static VALUE
4487 threadptr_recursive_hash(rb_thread_t *th)
4488 {
4489  return th->ec.local_storage_recursive_hash;
4490 }
4491 
4492 static void
4493 threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
4494 {
4495  th->ec.local_storage_recursive_hash = hash;
4496 }
4497 
4498 ID rb_frame_last_func(void);
4499 
4500 /*
4501  * Returns the current "recursive list" used to detect recursion.
4502  * This list is a hash table, unique for the current thread and for
4503  * the current __callee__.
4504  */
4505 
4506 static VALUE
4507 recursive_list_access(VALUE sym)
4508 {
4509  rb_thread_t *th = GET_THREAD();
4510  VALUE hash = threadptr_recursive_hash(th);
4511  VALUE list;
4512  if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
4513  hash = rb_ident_hash_new();
4514  threadptr_recursive_hash_set(th, hash);
4515  list = Qnil;
4516  }
4517  else {
4518  list = rb_hash_aref(hash, sym);
4519  }
4520  if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
4521  list = rb_hash_new();
4522  rb_hash_aset(hash, sym, list);
4523  }
4524  return list;
4525 }
4526 
4527 /*
4528  * Returns Qtrue iff obj_id (or the pair <obj, paired_obj>) is already
4529  * in the recursion list.
4530  * Assumes the recursion list is valid.
4531  */
4532 
4533 static VALUE
4534 recursive_check(VALUE list, VALUE obj_id, VALUE paired_obj_id)
4535 {
4536 #if SIZEOF_LONG == SIZEOF_VOIDP
4537  #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
4538 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4539  #define OBJ_ID_EQL(obj_id, other) (RB_TYPE_P((obj_id), T_BIGNUM) ? \
4540  rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
4541 #endif
4542 
4543  VALUE pair_list = rb_hash_lookup2(list, obj_id, Qundef);
4544  if (pair_list == Qundef)
4545  return Qfalse;
4546  if (paired_obj_id) {
4547  if (!RB_TYPE_P(pair_list, T_HASH)) {
4548  if (!OBJ_ID_EQL(paired_obj_id, pair_list))
4549  return Qfalse;
4550  }
4551  else {
4552  if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
4553  return Qfalse;
4554  }
4555  }
4556  return Qtrue;
4557 }
4558 
4559 /*
4560  * Pushes obj_id (or the pair <obj_id, paired_obj_id>) in the recursion list.
4561  * For a single obj_id, it sets list[obj_id] to Qtrue.
4562  * For a pair, it sets list[obj_id] to paired_obj_id if possible,
4563  * otherwise list[obj_id] becomes a hash like:
4564  * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
4565  * Assumes the recursion list is valid.
4566  */
4567 
4568 static void
4569 recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
4570 {
4571  VALUE pair_list;
4572 
4573  if (!paired_obj) {
4574  rb_hash_aset(list, obj, Qtrue);
4575  }
4576  else if ((pair_list = rb_hash_lookup2(list, obj, Qundef)) == Qundef) {
4577  rb_hash_aset(list, obj, paired_obj);
4578  }
4579  else {
4580  if (!RB_TYPE_P(pair_list, T_HASH)){
4581  VALUE other_paired_obj = pair_list;
4582  pair_list = rb_hash_new();
4583  rb_hash_aset(pair_list, other_paired_obj, Qtrue);
4584  rb_hash_aset(list, obj, pair_list);
4585  }
4586  rb_hash_aset(pair_list, paired_obj, Qtrue);
4587  }
4588 }
4589 
4590 /*
4591  * Pops obj_id (or the pair <obj_id, paired_obj_id>) from the recursion list.
4592  * For a pair, if list[obj_id] is a hash, then paired_obj_id is
4593  * removed from the hash and no attempt is made to simplify
4594  * list[obj_id] from {only_one_paired_id => true} to only_one_paired_id
4595  * Assumes the recursion list is valid.
4596  */
4597 
4598 static int
4599 recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
4600 {
4601  if (paired_obj) {
4602  VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
4603  if (pair_list == Qundef) {
4604  return 0;
4605  }
4606  if (RB_TYPE_P(pair_list, T_HASH)) {
4607  rb_hash_delete_entry(pair_list, paired_obj);
4608  if (!RHASH_EMPTY_P(pair_list)) {
4609  return 1; /* keep hash until is empty */
4610  }
4611  }
4612  }
4613  rb_hash_delete_entry(list, obj);
4614  return 1;
4615 }
4616 
4618  VALUE (*func) (VALUE, VALUE, int);
4619  VALUE list;
4620  VALUE obj;
4621  VALUE objid;
4622  VALUE pairid;
4623  VALUE arg;
4624 };
4625 
4626 static VALUE
4627 exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
4628 {
4629  struct exec_recursive_params *p = (void *)data;
4630  return (*p->func)(p->obj, p->arg, FALSE);
4631 }
4632 
4633 /*
4634  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4635  * current method is called recursively on obj, or on the pair <obj, pairid>
4636  * If outer is 0, then the innermost func will be called with recursive set
4637  * to Qtrue, otherwise the outermost func will be called. In the latter case,
4638  * all inner func are short-circuited by throw.
4639  * Implementation details: the value thrown is the recursive list which is
4640  * proper to the current method and unlikely to be caught anywhere else.
4641  * list[recursive_key] is used as a flag for the outermost call.
4642  */
4643 
4644 static VALUE
4645 exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer)
4646 {
4647  VALUE result = Qundef;
4648  const ID mid = rb_frame_last_func();
4649  const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
4650  struct exec_recursive_params p;
4651  int outermost;
4652  p.list = recursive_list_access(sym);
4653  p.objid = rb_obj_id(obj);
4654  p.obj = obj;
4655  p.pairid = pairid;
4656  p.arg = arg;
4657  outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
4658 
4659  if (recursive_check(p.list, p.objid, pairid)) {
4660  if (outer && !outermost) {
4661  rb_throw_obj(p.list, p.list);
4662  }
4663  return (*func)(obj, arg, TRUE);
4664  }
4665  else {
4666  enum ruby_tag_type state;
4667 
4668  p.func = func;
4669 
4670  if (outermost) {
4671  recursive_push(p.list, ID2SYM(recursive_key), 0);
4672  recursive_push(p.list, p.objid, p.pairid);
4673  result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
4674  if (!recursive_pop(p.list, p.objid, p.pairid)) goto invalid;
4675  if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
4676  if (state != TAG_NONE) JUMP_TAG(state);
4677  if (result == p.list) {
4678  result = (*func)(obj, arg, TRUE);
4679  }
4680  }
4681  else {
4682  volatile VALUE ret = Qundef;
4683  recursive_push(p.list, p.objid, p.pairid);
4684  PUSH_TAG();
4685  if ((state = EXEC_TAG()) == TAG_NONE) {
4686  ret = (*func)(obj, arg, FALSE);
4687  }
4688  POP_TAG();
4689  if (!recursive_pop(p.list, p.objid, p.pairid)) {
4690  invalid:
4691  rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
4692  "for %+"PRIsVALUE" in %+"PRIsVALUE,
4693  sym, rb_thread_current());
4694  }
4695  if (state != TAG_NONE) JUMP_TAG(state);
4696  result = ret;
4697  }
4698  }
4699  *(volatile struct exec_recursive_params *)&p;
4700  return result;
4701 }
4702 
4703 /*
4704  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4705  * current method is called recursively on obj
4706  */
4707 
4708 VALUE
4709 rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
4710 {
4711  return exec_recursive(func, obj, 0, arg, 0);
4712 }
4713 
4714 /*
4715  * Calls func(obj, arg, recursive), where recursive is non-zero if the
4716  * current method is called recursively on the ordered pair <obj, paired_obj>
4717  */
4718 
4719 VALUE
4720 rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
4721 {
4722  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 0);
4723 }
4724 
4725 /*
4726  * If recursion is detected on the current method and obj, the outermost
4727  * func will be called with (obj, arg, Qtrue). All inner func will be
4728  * short-circuited using throw.
4729  */
4730 
4731 VALUE
4733 {
4734  return exec_recursive(func, obj, 0, arg, 1);
4735 }
4736 
4737 /*
4738  * If recursion is detected on the current method, obj and paired_obj,
4739  * the outermost func will be called with (obj, arg, Qtrue). All inner
4740  * func will be short-circuited using throw.
4741  */
4742 
4743 VALUE
4744 rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
4745 {
4746  return exec_recursive(func, obj, rb_obj_id(paired_obj), arg, 1);
4747 }
4748 
4749 /*
4750  * call-seq:
4751  * thread.backtrace -> array
4752  *
4753  * Returns the current backtrace of the target thread.
4754  *
4755  */
4756 
4757 static VALUE
4758 rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
4759 {
4760  return rb_vm_thread_backtrace(argc, argv, thval);
4761 }
4762 
4763 /* call-seq:
4764  * thread.backtrace_locations(*args) -> array or nil
4765  *
4766  * Returns the execution stack for the target thread---an array containing
4767  * backtrace location objects.
4768  *
4769  * See Thread::Backtrace::Location for more information.
4770  *
4771  * This method behaves similarly to Kernel#caller_locations except it applies
4772  * to a specific thread.
4773  */
4774 static VALUE
4775 rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
4776 {
4777  return rb_vm_thread_backtrace_locations(argc, argv, thval);
4778 }
4779 
4780 /*
4781  * Document-class: ThreadError
4782  *
4783  * Raised when an invalid operation is attempted on a thread.
4784  *
4785  * For example, when no other thread has been started:
4786  *
4787  * Thread.stop
4788  *
4789  * This will raises the following exception:
4790  *
4791  * ThreadError: stopping only thread
4792  * note: use sleep to stop forever
4793  */
4794 
4795 void
4797 {
4798 #undef rb_intern
4799 #define rb_intern(str) rb_intern_const(str)
4800 
4801  VALUE cThGroup;
4802  rb_thread_t *th = GET_THREAD();
4803 
4804  sym_never = ID2SYM(rb_intern("never"));
4805  sym_immediate = ID2SYM(rb_intern("immediate"));
4806  sym_on_blocking = ID2SYM(rb_intern("on_blocking"));
4807  id_locals = rb_intern("locals");
4808 
4809  rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
4810  rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
4811  rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
4812  rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
4813  rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
4815  rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
4816  rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
4817  rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
4819  rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
4820  rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
4821  rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
4822  rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
4823 #if THREAD_DEBUG < 0
4824  rb_define_singleton_method(rb_cThread, "DEBUG", rb_thread_s_debug, 0);
4825  rb_define_singleton_method(rb_cThread, "DEBUG=", rb_thread_s_debug_set, 1);
4826 #endif
4827  rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
4828  rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
4829  rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
4830 
4831  rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
4832  rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
4833  rb_define_method(rb_cThread, "join", thread_join_m, -1);
4834  rb_define_method(rb_cThread, "value", thread_value, 0);
4836  rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
4840  rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
4841  rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
4842  rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
4843  rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
4844  rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
4845  rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
4846  rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
4848  rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
4849  rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
4850  rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
4851  rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
4852  rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
4853  rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
4854  rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
4855  rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
4856  rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
4857  rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
4858  rb_define_method(rb_cThread, "safe_level", rb_thread_safe_level, 0);
4860  rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
4861  rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
4862 
4863  rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
4864  rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
4865  rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
4866  rb_define_alias(rb_cThread, "inspect", "to_s");
4867 
4869  "stream closed in another thread");
4870 
4871  cThGroup = rb_define_class("ThreadGroup", rb_cObject);
4872  rb_define_alloc_func(cThGroup, thgroup_s_alloc);
4873  rb_define_method(cThGroup, "list", thgroup_list, 0);
4874  rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
4875  rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
4876  rb_define_method(cThGroup, "add", thgroup_add, 1);
4877 
4878  {
4879  th->thgroup = th->vm->thgroup_default = rb_obj_alloc(cThGroup);
4880  rb_define_const(cThGroup, "Default", th->thgroup);
4881  }
4882 
4883  recursive_key = rb_intern("__recursive_key__");
4885 
4886  /* init thread core */
4887  {
4888  /* main thread setting */
4889  {
4890  /* acquire global vm lock */
4891  gvl_init(th->vm);
4892  gvl_acquire(th->vm, th);
4893  native_mutex_initialize(&th->vm->thread_destruct_lock);
4894  native_mutex_initialize(&th->interrupt_lock);
4895 
4899 
4900  th->interrupt_mask = 0;
4901  }
4902  }
4903 
4904  rb_thread_create_timer_thread();
4905 
4906  /* suppress warnings on cygwin, mingw and mswin.*/
4907  (void)native_mutex_trylock;
4908 
4909  Init_thread_sync();
4910 }
4911 
4912 int
4914 {
4915  rb_thread_t *th = ruby_thread_from_native();
4916 
4917  return th != 0;
4918 }
4919 
4920 static void
4921 debug_deadlock_check(rb_vm_t *vm, VALUE msg)
4922 {
4923  rb_thread_t *th = 0;
4924  VALUE sep = rb_str_new_cstr("\n ");
4925 
4926  rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
4927  vm_living_thread_num(vm), vm->sleeper, GET_THREAD(), vm->main_thread);
4928  list_for_each(&vm->living_threads, th, vmlt_node) {
4929  rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
4930  "native:%"PRI_THREAD_ID" int:%u",
4931  th->self, th, thread_id_str(th), th->interrupt_flag);
4932  if (th->locking_mutex) {
4933  rb_mutex_t *mutex;
4934  GetMutexPtr(th->locking_mutex, mutex);
4935  rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
4936  mutex->th, rb_mutex_num_waiting(mutex));
4937  }
4938  {
4939  rb_thread_list_t *list = th->join_list;
4940  while (list) {
4941  rb_str_catf(msg, "\n depended by: tb_thread_id:%p", list->th);
4942  list = list->next;
4943  }
4944  }
4945  rb_str_catf(msg, "\n ");
4947  rb_str_catf(msg, "\n");
4948  }
4949 }
4950 
4951 static void
4952 rb_check_deadlock(rb_vm_t *vm)
4953 {
4954  int found = 0;
4955  rb_thread_t *th = 0;
4956 
4957  if (vm_living_thread_num(vm) > vm->sleeper) return;
4958  if (vm_living_thread_num(vm) < vm->sleeper) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
4959  if (patrol_thread && patrol_thread != GET_THREAD()) return;
4960 
4961  list_for_each(&vm->living_threads, th, vmlt_node) {
4963  found = 1;
4964  }
4965  else if (th->locking_mutex) {
4966  rb_mutex_t *mutex;
4967  GetMutexPtr(th->locking_mutex, mutex);
4968 
4969  if (mutex->th == th || (!mutex->th && !list_empty(&mutex->waitq))) {
4970  found = 1;
4971  }
4972  }
4973  if (found)
4974  break;
4975  }
4976 
4977  if (!found) {
4978  VALUE argv[2];
4979  argv[0] = rb_eFatal;
4980  argv[1] = rb_str_new2("No live threads left. Deadlock?");
4981  debug_deadlock_check(vm, argv[1]);
4982  vm->sleeper--;
4983  rb_threadptr_raise(vm->main_thread, 2, argv);
4984  }
4985 }
4986 
4987 static void
4988 update_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
4989 {
4990  VALUE coverage = rb_iseq_coverage(GET_THREAD()->ec.cfp->iseq);
4991  if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
4992  long arg = FIX2INT(trace_arg->data);
4993  switch (arg % 16) {
4994  case COVERAGE_INDEX_LINES: {
4995  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4996  if (lines) {
4997  long line = rb_sourceline() - 1;
4998  long count;
4999  VALUE num;
5000  if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5001  return;
5002  }
5003  num = RARRAY_AREF(lines, line);
5004  if (!FIXNUM_P(num)) return;
5005  count = FIX2LONG(num) + 1;
5006  if (POSFIXABLE(count)) {
5007  RARRAY_ASET(lines, line, LONG2FIX(count));
5008  }
5009  }
5010  break;
5011  }
5012  case COVERAGE_INDEX_BRANCHES: {
5013  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5014  if (branches) {
5015  long count;
5016  long idx = arg / 16;
5017  VALUE counters = RARRAY_AREF(branches, 1);
5018  VALUE num = RARRAY_AREF(counters, idx);
5019  count = FIX2LONG(num) + 1;
5020  if (POSFIXABLE(count)) {
5021  RARRAY_ASET(counters, idx, LONG2FIX(count));
5022  }
5023  }
5024  break;
5025  }
5026  case COVERAGE_INDEX_METHODS: {
5027  VALUE methods = RARRAY_AREF(coverage, COVERAGE_INDEX_METHODS);
5028  if (methods) {
5029  long count;
5030  long idx = arg / 16 * 3 + 2;
5031  VALUE num = RARRAY_AREF(methods, idx);
5032  count = FIX2LONG(num) + 1;
5033  if (POSFIXABLE(count)) {
5034  RARRAY_ASET(methods, idx, LONG2FIX(count));
5035  }
5036  }
5037  break;
5038  }
5039  }
5040  }
5041 }
5042 
5043 VALUE
5045 {
5046  return GET_VM()->coverages;
5047 }
5048 
5049 void
5050 rb_set_coverages(VALUE coverages, int mode)
5051 {
5052  GET_VM()->coverages = coverages;
5053  GET_VM()->coverage_mode = mode;
5055 }
5056 
5057 /* Make coverage arrays empty so old covered files are no longer tracked. */
5058 static int
5059 reset_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
5060 {
5061  VALUE coverage = (VALUE)val;
5062  VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5063  VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5064  VALUE methods = RARRAY_AREF(coverage, COVERAGE_INDEX_METHODS);
5065  if (lines) rb_ary_clear(lines);
5066  if (branches) rb_ary_clear(branches);
5067  if (methods) rb_ary_clear(methods);
5068  return ST_CONTINUE;
5069 }
5070 
5071 void
5073 {
5074  VALUE coverages = rb_get_coverages();
5075  st_foreach(rb_hash_tbl_raw(coverages), reset_coverage_i, 0);
5076  GET_VM()->coverages = Qfalse;
5077  rb_remove_event_hook((rb_event_hook_func_t) update_coverage);
5078 }
5079 
5080 VALUE
5082 {
5083  VALUE coverage = rb_ary_tmp_new_fill(3);
5084  VALUE lines = Qfalse, branches = Qfalse, methods = Qfalse;
5085  int mode = GET_VM()->coverage_mode;
5086 
5087  if (mode & COVERAGE_TARGET_LINES) {
5088  lines = n > 0 ? rb_ary_tmp_new_fill(n) : rb_ary_tmp_new(0);
5089  }
5090  RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5091 
5092  if (mode & COVERAGE_TARGET_BRANCHES) {
5093  branches = rb_ary_tmp_new_fill(2);
5094  /* internal data structures for branch coverage:
5095  *
5096  * [[base_type, base_lineno,
5097  * target_type_1, target_lineno_1, target_counter_index_1,
5098  * target_type_2, target_lineno_2, target_counter_index_2, ...],
5099  * ...]
5100  *
5101  * Example: [[:case, 1,
5102  * :when, 2, 0,
5103  * :when, 3, 1, ...],
5104  * ...]
5105  */
5106  RARRAY_ASET(branches, 0, rb_ary_tmp_new(0));
5107  /* branch execution counters */
5108  RARRAY_ASET(branches, 1, rb_ary_tmp_new(0));
5109  }
5110  RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5111 
5112  if (mode & COVERAGE_TARGET_METHODS) {
5113  methods = rb_ary_tmp_new(0);
5114  /* internal data structures for method coverage:
5115  *
5116  * [symbol_of_method_name, lineno_of_method_head, counter,
5117  * ...]
5118  *
5119  * Example: [:foobar, 1, 0, ...]
5120  */
5121  }
5122  RARRAY_ASET(coverage, COVERAGE_INDEX_METHODS, methods);
5123 
5124  return coverage;
5125 }
5126 
5127 VALUE
5129 {
5130  VALUE interrupt_mask = rb_ident_hash_new();
5131  rb_thread_t *cur_th = GET_THREAD();
5132 
5133  rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5134  OBJ_FREEZE_RAW(interrupt_mask);
5135  rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5136 
5137  return rb_ensure(b_proc, data, rb_ary_pop, cur_th->pending_interrupt_mask_stack);
5138 }
#define GetMutexPtr(obj, tobj)
Definition: thread_sync.c:84
#define RBASIC_CLEAR_CLASS(obj)
Definition: internal.h:1469
VALUE rb_mutex_lock(VALUE mutex)
Definition: thread_sync.c:232
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:462
rb_thread_list_t * join_list
Definition: vm_core.h:839
#define T_OBJECT
Definition: ruby.h:491
#define eKillSignal
Definition: thread.c:101
#define RUBY_EVENT_THREAD_END
Definition: ruby.h:2094
ID rb_check_id(volatile VALUE *)
Returns ID for the given name if it is interned already, or 0.
Definition: symbol.c:915
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:1627
rb_vm_t * vm
Definition: vm_core.h:788
const VALUE * root_lep
Definition: vm_core.h:760
void rb_warn(const char *fmt,...)
Definition: error.c:246
int ruby_thread_has_gvl_p(void)
Definition: thread.c:1543
VALUE rb_ary_pop(VALUE ary)
Definition: array.c:968
void rb_bug(const char *fmt,...)
Definition: error.c:521
struct rb_mutex_struct * next_mutex
Definition: thread_sync.c:49
#define RARRAY_LEN(a)
Definition: ruby.h:1019
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4596
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1592
#define fd_init_copy(f)
#define FALSE
Definition: nkf.h:174
ruby_tag_type
Definition: vm_core.h:151
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1138
VALUE rb_obj_id(VALUE obj)
Definition: gc.c:3158
#define INT2NUM(x)
Definition: ruby.h:1538
struct rb_thread_struct * running_thread
Definition: vm_core.h:521
struct timeval * tv
Definition: thread.c:3951
Definition: st.h:79
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Definition: thread.c:374
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:1606
int pending_interrupt_queue_checked
Definition: vm_core.h:830
rb_fdset_t * read
Definition: thread.c:3948
VALUE rb_exec_recursive_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4732
#define NUM2INT(x)
Definition: ruby.h:684
int count
Definition: encoding.c:56
rb_control_frame_t * cfp
Definition: vm_core.h:744
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1716
int rb_thread_check_trap_pending(void)
Definition: thread.c:1230
#define TAG_NONE
Definition: vm_core.h:164
void rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
Definition: thread.c:2029
VALUE rb_thread_list(void)
Definition: thread.c:2474
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:907
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition: eval.c:835
VALUE rb_exec_recursive(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE arg)
Definition: thread.c:4709
#define CLASS_OF(v)
Definition: ruby.h:453
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2284
struct rb_thread_struct * th
Definition: vm_core.h:722
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition: eval.c:1159
void rb_unblock_function_t(void *)
Definition: intern.h:872
VALUE rb_ary_delete_at(VALUE ary, long pos)
Definition: array.c:3059
#define st_foreach
Definition: regint.h:186
Definition: id.h:108
rb_unblock_function_t * func
Definition: vm_core.h:714
#define Qtrue
Definition: ruby.h:437
int rb_remove_event_hook(rb_event_hook_func_t func)
Definition: vm_trace.c:194
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1162
const char ruby_digitmap[]
Definition: bignum.c:38
#define CLOCK_MONOTONIC
Definition: win32.h:134
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:829
VALUE rb_ary_shift(VALUE ary)
Definition: array.c:1019
Definition: st.h:99
#define TypedData_Get_Struct(obj, type, data_type, sval)
Definition: ruby.h:1183
const int id
Definition: nkf.c:209
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Definition: error.c:836
#define COVERAGE_TARGET_LINES
Definition: internal.h:1697
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2149
struct rb_thread_struct volatile * th
Definition: thread_sync.c:48
SOCKET rb_w32_get_osfhandle(int)
Definition: win32.c:1064
#define rb_vm_register_special_exception(sp, e, m)
Definition: vm_core.h:1543
VALUE rb_thread_stop(void)
Definition: thread.c:2441
#define TH_JUMP_TAG(th, st)
Definition: eval_intern.h:204
#define rb_check_arity
Definition: intern.h:298
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE passed_block_handler)
Definition: vm.c:1172
#define MAYBE_UNUSED(x)
Definition: internal.h:46
VALUE rb_ary_push(VALUE ary, VALUE item)
Definition: array.c:924
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1135
void rb_thread_wait_for(struct timeval time)
Definition: thread.c:1205
void rb_add_event_hook2(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data, rb_event_hook_flag_t hook_flag)
Definition: vm_trace.c:148
if(len<=MAX_WORD_LENGTH &&len >=MIN_WORD_LENGTH)
Definition: zonetab.h:883
VALUE rb_str_concat(VALUE, VALUE)
Definition: string.c:2999
#define EXIT_SUCCESS
Definition: error.c:37
void rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:1036
struct st_table * rb_hash_tbl_raw(VALUE hash)
Definition: hash.c:482
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:544
unsigned int report_on_exception
Definition: vm_core.h:858
#define rb_fd_zero(f)
Definition: intern.h:347
void rb_threadptr_setup_exception(rb_thread_t *th, VALUE mesg, VALUE cause)
Definition: eval.c:586
VALUE rb_thread_current(void)
Definition: thread.c:2494
#define PRIxVALUE
Definition: ruby.h:133
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1595
#define OBJ_ID_EQL(obj_id, other)
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2582
VALUE rb_ivar_get(VALUE, ID)
Definition: variable.c:1210
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Definition: thread.c:356
void * rb_thread_call_without_gvl(void *(*func)(void *data), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1429
VALUE rb_ary_clear(VALUE ary)
Definition: array.c:3501
int rb_thread_alone(void)
Definition: thread.c:3273
#define TH_EXEC_TAG()
Definition: eval_intern.h:198
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
Definition: vm_eval.c:2006
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
#define T_HASH
Definition: ruby.h:499
VALUE rb_obj_alloc(VALUE)
Allocates an instance of klass.
Definition: object.c:2121
VALUE rb_thread_local_aref(VALUE thread, ID id)
Definition: thread.c:3013
#define DATA_PTR(dta)
Definition: ruby.h:1106
#define RUBY_VM_SET_TRAP_INTERRUPT(th)
Definition: vm_core.h:1608
void rb_gc_mark(VALUE ptr)
Definition: gc.c:4464
VALUE rb_hash_lookup(VALUE hash, VALUE key)
Definition: hash.c:853
#define T_ARRAY
Definition: ruby.h:498
rb_thread_t * th
Definition: thread.c:107
#define st_delete
Definition: regint.h:182
#define st_lookup
Definition: regint.h:185
#define PUSH_TAG()
Definition: eval_intern.h:147
void rb_set_coverages(VALUE coverages, int mode)
Definition: thread.c:5050
time_t tv_sec
Definition: missing.h:54
#define COVERAGE_INDEX_BRANCHES
Definition: internal.h:1695
VALUE rb_thread_kill(VALUE thread)
Definition: thread.c:2276
VALUE rb_threadptr_backtrace_str_ary(rb_thread_t *th, long lev, long n)
Definition: vm_backtrace.c:653
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1035
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:6175
void rb_obj_call_init(VALUE obj, int argc, const VALUE *argv)
Calls #initialize method of obj with the given arguments.
Definition: eval.c:1583
#define FIXNUM_P(f)
Definition: ruby.h:365
rb_fdset_t * write
Definition: thread.c:3949
VALUE rb_exec_recursive_paired(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4720
#define thread_id_str(th)
Definition: thread.c:279
void rb_thread_start_timer_thread(void)
Definition: thread.c:4084
#define THROW_DATA_P(err)
Definition: internal.h:903
#define RB_WAITFD_OUT
Definition: io.h:49
#define GET_THREAD()
Definition: vm_core.h:1583
VALUE thgroup_default
Definition: vm_core.h:526
#define rb_fd_set(n, f)
Definition: intern.h:348
VALUE rb_eArgError
Definition: error.c:802
time_t tv_sec
Definition: missing.h:61
#define sym(x)
Definition: date_core.c:3721
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: st.h:22
int ruby_native_thread_p(void)
Definition: thread.c:4913
#define rb_fd_isset(n, f)
Definition: intern.h:350
#define PRI_THREAD_ID
Definition: thread.c:280
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Definition: ruby.h:1851
void rb_hash_foreach(VALUE hash, int(*func)(ANYARGS), VALUE farg)
Definition: hash.c:385
VALUE rb_thread_wakeup(VALUE thread)
Definition: thread.c:2370
struct timeval rb_time_timeval(VALUE time)
Definition: time.c:2305
#define RHASH(obj)
Definition: internal.h:663
VALUE rb_obj_class(VALUE)
call-seq: obj.class -> class
Definition: object.c:277
#define RB_TYPE_P(obj, type)
Definition: ruby.h:527
void rb_reset_random_seed(void)
Definition: random.c:1570
int rb_thread_fd_writable(int fd)
Definition: thread.c:3782
VALUE rb_obj_is_kind_of(VALUE, VALUE)
call-seq: obj.is_a?(class) -> true or false obj.kind_of?(class) -> true or false
Definition: object.c:842
#define POSFIXABLE(f)
Definition: ruby.h:366
#define RUBY_VM_CHECK_INTS_BLOCKING(th)
Definition: thread.c:176
#define RUBY_VM_INTERRUPTED_ANY(th)
Definition: vm_core.h:1610
#define TH_POP_TAG()
Definition: eval_intern.h:138
#define MEMZERO(p, type, n)
Definition: ruby.h:1660
#define PRI_TIMET_PREFIX
Definition: ruby.h:143
rb_thread_t * target
Definition: thread.c:841
size_t living_thread_num
Definition: vm_core.h:525
VALUE rb_eSignal
Definition: error.c:797
#define RUBY_THREAD_PRIORITY_MAX
Definition: thread.c:77
fd_set rb_fdset_t
Definition: intern.h:346
#define rb_fd_term(f)
Definition: intern.h:357
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Definition: hash.c:1616
#define EXEC_TAG()
Definition: eval_intern.h:201
VALUE rb_convert_type_with_id(VALUE, int, const char *, ID)
Definition: object.c:2979
VALUE rb_vm_thread_backtrace_locations(int argc, const VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:907
VALUE locking_mutex
Definition: vm_core.h:836
#define val
long tv_usec
Definition: missing.h:55
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1893
int fd
Definition: thread.c:108
#define RB_WAITFD_PRI
Definition: io.h:48
#define PRIdVALUE
Definition: ruby.h:130
#define rb_fd_ptr(f)
Definition: intern.h:354
VALUE rb_hash_delete_entry(VALUE hash, VALUE key)
Definition: hash.c:1098
VALUE rb_ary_new(void)
Definition: array.c:499
void * blocking_region_buffer
Definition: vm_core.h:817
void Init_Thread(void)
Definition: thread.c:4796
VALUE rb_default_coverage(int n)
Definition: thread.c:5081
#define RCLASS_ORIGIN(c)
Definition: internal.h:794
#define JUMP_TAG(st)
Definition: eval_intern.h:206
#define NIL_P(v)
Definition: ruby.h:451
long tv_nsec
Definition: missing.h:62
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:646
int enclosed
Definition: thread.c:4197
#define rb_intern(str)
void rb_define_const(VALUE, const char *, VALUE)
Definition: variable.c:2691
void rb_thread_atfork_before_exec(void)
Definition: thread.c:4191
#define thread_debug
Definition: thread.c:273
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4029
int argc
Definition: ruby.c:187
void rb_thread_stop_timer_thread(void)
Definition: thread.c:4070
rb_thread_status
Definition: vm_core.h:683
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Definition: ruby.h:2117
#define Qfalse
Definition: ruby.h:436
const VALUE special_exceptions[ruby_special_error_count]
Definition: vm_core.h:536
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:559
VALUE rb_proc_location(VALUE self)
Definition: proc.c:1142
#define threadptr_initialized(th)
Definition: thread.c:751
RUBY_EXTERN VALUE rb_cModule
Definition: ruby.h:1916
void rb_thread_check_ints(void)
Definition: thread.c:1219
#define RUBY_UBF_PROCESS
Definition: intern.h:878
void rb_exit(int status)
Definition: process.c:3783
void rb_thread_fd_close(int fd)
Definition: thread.c:2219
VALUE rb_thread_shield_new(void)
Definition: thread.c:4422
volatile int sleeper
Definition: vm_core.h:532
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition: eval.c:256
#define rb_str_new2
Definition: intern.h:835
#define OBJ_FREEZE_RAW(x)
Definition: ruby.h:1305
int err
Definition: win32.c:135
void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo)
Definition: eval_error.c:167
#define EXIT_FAILURE
Definition: eval_intern.h:33
void rb_error_frozen(const char *what)
Definition: error.c:2584
VALUE rb_thread_shield_release(VALUE self)
Definition: thread.c:4467
void rb_thread_atfork(void)
Definition: thread.c:4186
#define POP_TAG()
Definition: eval_intern.h:148
struct list_node wfd_node
Definition: thread.c:106
VALUE rb_thread_create(VALUE(*fn)(ANYARGS), void *arg)
Definition: thread.c:831
void rb_throw_obj(VALUE tag, VALUE value)
Definition: vm_eval.c:1868
#define FD_SET(fd, set)
Definition: win32.h:593
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:132
#define ALLOC(type)
Definition: ruby.h:1588
VALUE read
Definition: io.c:8809
void rb_sys_fail(const char *mesg)
Definition: error.c:2403
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1758
VALUE rb_vm_thread_backtrace(int argc, const VALUE *argv, VALUE thval)
Definition: vm_backtrace.c:901
VALUE rb_yield(VALUE)
Definition: vm_eval.c:973
#define RARRAY_CONST_PTR(a)
Definition: ruby.h:1021
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Definition: thread.c:1422
#define rb_thread_set_current(th)
Definition: vm_core.h:1586
int errno
#define TRUE
Definition: nkf.h:175
VALUE rb_uninterruptible(VALUE(*b_proc)(ANYARGS), VALUE data)
Definition: thread.c:5128
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:837
VALUE rb_thread_shield_wait(VALUE self)
Definition: thread.c:4438
VALUE rb_sprintf(const char *format,...)
Definition: sprintf.c:1452
int rb_get_next_signal(void)
Definition: signal.c:741
int rb_thread_fd_select(int max, rb_fdset_t *read, rb_fdset_t *write, rb_fdset_t *except, struct timeval *timeout)
Definition: thread.c:3789
VALUE rb_to_symbol(VALUE name)
Definition: string.c:10506
#define rb_fd_copy(d, s, n)
Definition: intern.h:351
#define VM_ASSERT(expr)
Definition: vm_core.h:53
#define rb_enc_name(enc)
Definition: encoding.h:171
VALUE rb_class_path(VALUE)
Definition: variable.c:295
VALUE rb_hash_new(void)
Definition: hash.c:424
#define do_select_update()
#define DELAY_INFTY
Definition: thread.c:838
int rb_threadptr_reset_raised(rb_thread_t *th)
Definition: thread.c:2183
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Definition: class.c:1908
VALUE rb_class_inherited_p(VALUE mod, VALUE arg)
call-seq: mod <= other -> true, false, or nil
Definition: object.c:1827
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
Definition: iseq.c:743
struct list_head waiting_fds
Definition: vm_core.h:523
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4309
#define RUBY_VM_SET_TIMER_INTERRUPT(th)
Definition: vm_core.h:1605
#define PRIsVALUE
Definition: ruby.h:135
unsigned long ID
Definition: ruby.h:86
handle_interrupt_timing
Definition: thread.c:1609
#define TAG_FATAL
Definition: vm_core.h:172
VALUE rb_make_exception(int argc, const VALUE *argv)
Make an Exception object from the list of arguments in a manner similar to Kernel#raise.
Definition: eval.c:788
#define Qnil
Definition: ruby.h:438
void rb_thread_sleep_forever(void)
Definition: thread.c:1160
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:615
VALUE rb_eStandardError
Definition: error.c:799
VALUE group
Definition: thread.c:4198
#define BUILTIN_TYPE(x)
Definition: ruby.h:518
unsigned long VALUE
Definition: ruby.h:85
#define THREAD_SHIELD_WAITING_MASK
Definition: thread.c:4396
#define SAVE_ROOT_JMPBUF(th, stmt)
Definition: eval_intern.h:121
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:565
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1686
RUBY_EXTERN VALUE rb_cThread
Definition: ruby.h:1930
int rb_wait_for_single_fd(int fd, int events, struct timeval *tv)
Definition: thread.c:3989
#define RBASIC(obj)
Definition: ruby.h:1197
union select_args::@118 as
struct rb_thread_struct * main_thread
Definition: vm_core.h:520
int error
Definition: thread.c:3946
VALUE rb_eTypeError
Definition: error.c:801
VALUE first_proc
Definition: vm_core.h:841
#define FIX2INT(x)
Definition: ruby.h:686
#define COVERAGE_INDEX_LINES
Definition: internal.h:1694
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1589
#define RUBY_EVENT_THREAD_BEGIN
Definition: ruby.h:2093
void rb_gc_set_stack_end(VALUE **stack_end_p)
Definition: thread.c:4016
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:131
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4608
void rb_thread_schedule(void)
Definition: thread.c:1266
#define rb_enc_asciicompat(enc)
Definition: encoding.h:239
VALUE rb_str_new_cstr(const char *)
Definition: string.c:771
VALUE rb_exec_recursive_paired_outer(VALUE(*func)(VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
Definition: thread.c:4744
rb_atomic_t interrupt_flag
Definition: vm_core.h:832
void rb_thread_wait_fd(int fd)
Definition: thread.c:3776
VALUE rb_blocking_function_t(void *)
Definition: intern.h:873
VALUE rb_thread_main(void)
Definition: thread.c:2515
struct rb_execution_context_struct::@143 machine
unsigned int uint32_t
Definition: sha2.h:101
#define StringValueCStr(v)
Definition: ruby.h:571
VALUE(* first_func)(ANYARGS)
Definition: vm_core.h:843
enum rb_thread_status status
Definition: vm_core.h:812
void rb_thread_sleep(int sec)
Definition: thread.c:1243
#define rb_fd_max(f)
Definition: intern.h:358
#define thread_start_func_2(th, st, rst)
Definition: thread.c:284
void rb_thread_sleep_deadly(void)
Definition: thread.c:1167
enum rb_thread_status prev_status
Definition: thread.c:122
#define RARRAY_ASET(a, i, v)
Definition: ruby.h:1034
VALUE first_args
Definition: vm_core.h:842
struct list_head waitq
Definition: thread_sync.c:50
void rb_thread_recycle_stack_release(VALUE *)
Definition: vm.c:2355
void rb_thread_terminate_all(void)
Definition: thread.c:479
rb_encoding * rb_enc_get(VALUE obj)
Definition: encoding.c:860
#define THREAD_SHIELD_WAITING_SHIFT
Definition: thread.c:4397
int size
Definition: encoding.c:57
void rb_reset_coverages(void)
Definition: thread.c:5072
VALUE rb_ident_hash_new(void)
Definition: hash.c:2924
VALUE rb_hash_lookup2(VALUE hash, VALUE key, VALUE def)
Definition: hash.c:842
#define INT2FIX(i)
Definition: ruby.h:232
void rb_thread_execute_interrupts(VALUE thval)
Definition: thread.c:2107
VALUE(* func)(VALUE, VALUE, int)
Definition: thread.c:4618
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:55
#define RCLASS_SUPER(c)
Definition: classext.h:16
int rb_sourceline(void)
Definition: vm.c:1283
#define RARRAY_AREF(a, i)
Definition: ruby.h:1033
#define COVERAGE_TARGET_METHODS
Definition: internal.h:1699
#define RUBY_INTERNAL_EVENT_SWITCH
Definition: ruby.h:2103
unsigned long interrupt_mask
Definition: vm_core.h:833
VALUE rb_block_proc(void)
Definition: proc.c:780
#define xmalloc
Definition: defines.h:183
#define RUBY_THREAD_PRIORITY_MIN
Definition: thread.c:78
#define st_init_numtable
Definition: regint.h:178
#define RBASIC_CLASS(obj)
Definition: ruby.h:878
#define ANYARGS
Definition: defines.h:173
VALUE rb_thread_group(VALUE thread)
Definition: thread.c:2758
struct rb_unblock_callback unblock
Definition: vm_core.h:835
VALUE rb_eRuntimeError
Definition: error.c:800
#define BLOCKING_REGION(exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:165
VALUE rb_hash_aref(VALUE hash, VALUE key)
Definition: hash.c:831
VALUE rb_eFatal
Definition: error.c:798
#define rb_fd_select(n, rfds, wfds, efds, timeout)
Definition: intern.h:359
#define RB_GC_SAVE_MACHINE_CONTEXT(th)
Definition: thread.c:138
VALUE rb_eSystemExit
Definition: error.c:795
VALUE rb_str_catf(VALUE str, const char *format,...)
Definition: sprintf.c:1492
void rb_thread_reset_timer_thread(void)
Definition: thread.c:4078
rb_nativethread_id_t thread_id
Definition: vm_core.h:808
rb_nativethread_lock_t thread_destruct_lock
Definition: vm_core.h:518
int rb_signal_buff_size(void)
Definition: signal.c:711
#define rb_fd_clr(n, f)
Definition: intern.h:349
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
#define LONG2FIX(i)
Definition: ruby.h:234
#define RTEST(v)
Definition: ruby.h:450
#define FD_CLR(f, s)
Definition: win32.h:611
uint32_t running_time_us
Definition: vm_core.h:862
rb_thread_t * waiting
Definition: thread.c:841
#define PRIuSIZE
Definition: ruby.h:177
VALUE rb_mutex_unlock(VALUE mutex)
Definition: thread_sync.c:370
struct rb_encoding_entry * list
Definition: encoding.c:55
VALUE rb_thread_shield_destroy(VALUE self)
Definition: thread.c:4478
#define OBJ_INFECT(x, s)
Definition: ruby.h:1302
VALUE rb_str_cat_cstr(VALUE, const char *)
Definition: string.c:2756
#define OBJ_FROZEN(x)
Definition: ruby.h:1304
unsigned int thread_report_on_exception
Definition: vm_core.h:530
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1175
double delay
Definition: thread.c:842
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Definition: thread.c:368
VALUE rb_ary_dup(VALUE ary)
Definition: array.c:1930
VALUE rb_ary_join(VALUE ary, VALUE sep)
Definition: array.c:2037
VALUE rb_ary_tmp_new_fill(long capa)
Definition: array.c:550
#define st_insert
Definition: regint.h:184
int rb_atomic_t
Definition: ruby_atomic.h:120
double rb_num2dbl(VALUE)
Converts a Numeric object to double.
Definition: object.c:3524
#define rb_fd_resize(n, f)
Definition: intern.h:353
#define rb_thread_shield_waiting(b)
Definition: thread.c:4398
rb_execution_context_t ec
Definition: vm_core.h:790
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
Definition: vm_core.h:736
#define ruby_debug
Definition: ruby.h:1814
#define RUBY_EVENT_COVERAGE
Definition: ruby.h:2100
const char * name
Definition: nkf.c:208
#define xrealloc
Definition: defines.h:186
RUBY_EXTERN VALUE rb_eIOError
Definition: ruby.h:1947
#define ID2SYM(x)
Definition: ruby.h:383
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd)
Definition: thread.c:1436
void rb_threadptr_trap_interrupt(rb_thread_t *th)
Definition: thread.c:439
#define rb_fd_init_copy(d, s)
Definition: intern.h:356
VALUE rb_str_new_frozen(VALUE)
Definition: string.c:1158
struct rb_thread_list_struct * next
Definition: vm_core.h:721
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:1609
#define rb_fd_init(f)
Definition: intern.h:355
VALUE rb_thread_local_aset(VALUE thread, ID id, VALUE val)
Definition: thread.c:3148
struct list_head living_threads
Definition: vm_core.h:524
#define rb_fd_dup(d, s)
Definition: intern.h:352
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:348
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
Definition: thread.c:1716
#define fd_term(f)
rb_fdset_t * except
Definition: thread.c:3950
#define FD_ISSET(f, s)
Definition: win32.h:614
#define RUBY_TYPED_DEFAULT_FREE
Definition: ruby.h:1134
NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start, VALUE *register_stack_start))
#define GetThreadShieldPtr(obj)
Definition: thread.c:4395
#define vsnprintf
Definition: subst.h:7
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:834
void void xfree(void *)
#define RB_WAITFD_IN
Definition: io.h:47
VALUE ruby_vm_special_exception_copy(VALUE)
Definition: vm_insnhelper.c:26
VALUE pending_interrupt_queue
Definition: vm_core.h:828
#define RHASH_EMPTY_P(h)
Definition: ruby.h:1060
VALUE write
Definition: io.c:8809
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
Definition: thread.c:1501
#define eTerminateSignal
Definition: thread.c:102
VALUE rb_get_coverages(void)
Definition: thread.c:5044
#define mod(x, y)
Definition: date_strftime.c:28
VALUE except
Definition: io.c:8809
#define NULL
Definition: _sdbm.c:102
#define COVERAGE_TARGET_BRANCHES
Definition: internal.h:1698
#define FIX2LONG(x)
Definition: ruby.h:363
#define Qundef
Definition: ruby.h:439
#define T_ICLASS
Definition: ruby.h:493
int rb_notify_fd_close(int fd)
Definition: thread.c:2193
VALUE rb_eKeyError
Definition: error.c:804
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:433
VALUE rb_thread_wakeup_alive(VALUE thread)
Definition: thread.c:2379
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1515
ID rb_to_id(VALUE)
Definition: string.c:10496
int select(int num_fds, fd_set *in_fds, fd_set *out_fds, fd_set *ex_fds, struct timeval *timeout)
VALUE rb_eThreadError
Definition: eval.c:857
VALUE rb_thread_run(VALUE thread)
Definition: thread.c:2417
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2159
#define TYPEOF_TIMEVAL_TV_SEC
Definition: timev.h:22
char ** argv
Definition: ruby.c:188
int rb_thread_to_be_killed(VALUE thread)
Definition: thread.c:2302
int rb_thread_interrupted(VALUE thval)
Definition: thread.c:1237
int rb_threadptr_set_raised(rb_thread_t *th)
Definition: thread.c:2173
RUBY_EXTERN void rb_write_error_str(VALUE mesg)
Definition: io.c:7580
#define RUBY_UBF_IO
Definition: intern.h:877
#define LIKELY(x)
Definition: internal.h:42
#define COVERAGE_INDEX_METHODS
Definition: internal.h:1696
#define GET_VM()
Definition: vm_core.h:1582
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Definition: thread.c:362