Ruby  2.5.0dev(2017-10-22revision60238)
thread_pthread.c
Go to the documentation of this file.
1 /* -*-c-*- */
2 /**********************************************************************
3 
4  thread_pthread.c -
5 
6  $Author$
7 
8  Copyright (C) 2004-2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13 
14 #include "gc.h"
15 
16 #ifdef HAVE_SYS_RESOURCE_H
17 #include <sys/resource.h>
18 #endif
19 #ifdef HAVE_THR_STKSEGMENT
20 #include <thread.h>
21 #endif
22 #if HAVE_FCNTL_H
23 #include <fcntl.h>
24 #elif HAVE_SYS_FCNTL_H
25 #include <sys/fcntl.h>
26 #endif
27 #ifdef HAVE_SYS_PRCTL_H
28 #include <sys/prctl.h>
29 #endif
30 #if defined(__native_client__) && defined(NACL_NEWLIB)
31 # include "nacl/select.h"
32 #endif
33 #if defined(HAVE_SYS_TIME_H)
34 #include <sys/time.h>
35 #endif
36 #if defined(__HAIKU__)
37 #include <kernel/OS.h>
38 #endif
39 
40 static void native_mutex_lock(rb_nativethread_lock_t *lock);
41 static void native_mutex_unlock(rb_nativethread_lock_t *lock);
42 static int native_mutex_trylock(rb_nativethread_lock_t *lock);
43 static void native_mutex_initialize(rb_nativethread_lock_t *lock);
44 static void native_mutex_destroy(rb_nativethread_lock_t *lock);
45 static void native_cond_signal(rb_nativethread_cond_t *cond);
46 static void native_cond_broadcast(rb_nativethread_cond_t *cond);
47 static void native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
48 static void native_cond_initialize(rb_nativethread_cond_t *cond, int flags);
49 static void native_cond_destroy(rb_nativethread_cond_t *cond);
50 static void rb_thread_wakeup_timer_thread_low(void);
51 static struct {
52  pthread_t id;
53  int created;
54 } timer_thread;
55 #define TIMER_THREAD_CREATED_P() (timer_thread.created != 0)
56 
57 #define RB_CONDATTR_CLOCK_MONOTONIC 1
58 
59 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \
60  defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
61  defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREAD_CONDATTR_INIT)
62 #define USE_MONOTONIC_COND 1
63 #else
64 #define USE_MONOTONIC_COND 0
65 #endif
66 
67 #if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL) && defined(O_NONBLOCK) && !defined(__native_client__)
68 /* The timer thread sleeps while only one Ruby thread is running. */
69 # define USE_SLEEPY_TIMER_THREAD 1
70 #else
71 # define USE_SLEEPY_TIMER_THREAD 0
72 #endif
73 
74 static void
75 gvl_acquire_common(rb_vm_t *vm)
76 {
77  if (vm->gvl.acquired) {
78 
79  vm->gvl.waiting++;
80  if (vm->gvl.waiting == 1) {
81  /*
82  * Wake up timer thread iff timer thread is slept.
83  * When timer thread is polling mode, we don't want to
84  * make confusing timer thread interval time.
85  */
86  rb_thread_wakeup_timer_thread_low();
87  }
88 
89  while (vm->gvl.acquired) {
90  native_cond_wait(&vm->gvl.cond, &vm->gvl.lock);
91  }
92 
93  vm->gvl.waiting--;
94 
95  if (vm->gvl.need_yield) {
96  vm->gvl.need_yield = 0;
97  native_cond_signal(&vm->gvl.switch_cond);
98  }
99  }
100 
101  vm->gvl.acquired = 1;
102 }
103 
104 static void
105 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
106 {
107  native_mutex_lock(&vm->gvl.lock);
108  gvl_acquire_common(vm);
109  native_mutex_unlock(&vm->gvl.lock);
110 }
111 
112 static void
113 gvl_release_common(rb_vm_t *vm)
114 {
115  vm->gvl.acquired = 0;
116  if (vm->gvl.waiting > 0)
117  native_cond_signal(&vm->gvl.cond);
118 }
119 
120 static void
121 gvl_release(rb_vm_t *vm)
122 {
123  native_mutex_lock(&vm->gvl.lock);
124  gvl_release_common(vm);
125  native_mutex_unlock(&vm->gvl.lock);
126 }
127 
128 static void
129 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
130 {
131  native_mutex_lock(&vm->gvl.lock);
132 
133  gvl_release_common(vm);
134 
135  /* An another thread is processing GVL yield. */
136  if (UNLIKELY(vm->gvl.wait_yield)) {
137  while (vm->gvl.wait_yield)
138  native_cond_wait(&vm->gvl.switch_wait_cond, &vm->gvl.lock);
139  goto acquire;
140  }
141 
142  if (vm->gvl.waiting > 0) {
143  /* Wait until another thread task take GVL. */
144  vm->gvl.need_yield = 1;
145  vm->gvl.wait_yield = 1;
146  while (vm->gvl.need_yield)
147  native_cond_wait(&vm->gvl.switch_cond, &vm->gvl.lock);
148  vm->gvl.wait_yield = 0;
149  }
150  else {
151  native_mutex_unlock(&vm->gvl.lock);
152  sched_yield();
153  native_mutex_lock(&vm->gvl.lock);
154  }
155 
156  native_cond_broadcast(&vm->gvl.switch_wait_cond);
157  acquire:
158  gvl_acquire_common(vm);
159  native_mutex_unlock(&vm->gvl.lock);
160 }
161 
162 static void
163 gvl_init(rb_vm_t *vm)
164 {
165  native_mutex_initialize(&vm->gvl.lock);
166  native_cond_initialize(&vm->gvl.cond, RB_CONDATTR_CLOCK_MONOTONIC);
167  native_cond_initialize(&vm->gvl.switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
168  native_cond_initialize(&vm->gvl.switch_wait_cond, RB_CONDATTR_CLOCK_MONOTONIC);
169  vm->gvl.acquired = 0;
170  vm->gvl.waiting = 0;
171  vm->gvl.need_yield = 0;
172  vm->gvl.wait_yield = 0;
173 }
174 
175 static void
176 gvl_destroy(rb_vm_t *vm)
177 {
178  native_cond_destroy(&vm->gvl.switch_wait_cond);
179  native_cond_destroy(&vm->gvl.switch_cond);
180  native_cond_destroy(&vm->gvl.cond);
181  native_mutex_destroy(&vm->gvl.lock);
182 }
183 
184 #if defined(HAVE_WORKING_FORK)
185 static void
186 gvl_atfork(rb_vm_t *vm)
187 {
188  gvl_init(vm);
189  gvl_acquire(vm, GET_THREAD());
190 }
191 #endif
192 
193 #define NATIVE_MUTEX_LOCK_DEBUG 0
194 
195 static void
196 mutex_debug(const char *msg, void *lock)
197 {
198  if (NATIVE_MUTEX_LOCK_DEBUG) {
199  int r;
200  static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
201 
202  if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
203  fprintf(stdout, "%s: %p\n", msg, lock);
204  if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
205  }
206 }
207 
208 static void
209 native_mutex_lock(pthread_mutex_t *lock)
210 {
211  int r;
212  mutex_debug("lock", lock);
213  if ((r = pthread_mutex_lock(lock)) != 0) {
214  rb_bug_errno("pthread_mutex_lock", r);
215  }
216 }
217 
218 static void
219 native_mutex_unlock(pthread_mutex_t *lock)
220 {
221  int r;
222  mutex_debug("unlock", lock);
223  if ((r = pthread_mutex_unlock(lock)) != 0) {
224  rb_bug_errno("pthread_mutex_unlock", r);
225  }
226 }
227 
228 static inline int
229 native_mutex_trylock(pthread_mutex_t *lock)
230 {
231  int r;
232  mutex_debug("trylock", lock);
233  if ((r = pthread_mutex_trylock(lock)) != 0) {
234  if (r == EBUSY) {
235  return EBUSY;
236  }
237  else {
238  rb_bug_errno("pthread_mutex_trylock", r);
239  }
240  }
241  return 0;
242 }
243 
244 static void
245 native_mutex_initialize(pthread_mutex_t *lock)
246 {
247  int r = pthread_mutex_init(lock, 0);
248  mutex_debug("init", lock);
249  if (r != 0) {
250  rb_bug_errno("pthread_mutex_init", r);
251  }
252 }
253 
254 static void
255 native_mutex_destroy(pthread_mutex_t *lock)
256 {
257  int r = pthread_mutex_destroy(lock);
258  mutex_debug("destroy", lock);
259  if (r != 0) {
260  rb_bug_errno("pthread_mutex_destroy", r);
261  }
262 }
263 
264 static void
265 native_cond_initialize(rb_nativethread_cond_t *cond, int flags)
266 {
267 #ifdef HAVE_PTHREAD_COND_INIT
268  int r;
269 # if USE_MONOTONIC_COND
270  pthread_condattr_t attr;
271 
272  pthread_condattr_init(&attr);
273 
274  cond->clockid = CLOCK_REALTIME;
275  if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
276  r = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
277  if (r == 0) {
278  cond->clockid = CLOCK_MONOTONIC;
279  }
280  }
281 
282  r = pthread_cond_init(&cond->cond, &attr);
283  pthread_condattr_destroy(&attr);
284 # else
285  r = pthread_cond_init(&cond->cond, NULL);
286 # endif
287  if (r != 0) {
288  rb_bug_errno("pthread_cond_init", r);
289  }
290 
291  return;
292 #endif
293 }
294 
295 static void
296 native_cond_destroy(rb_nativethread_cond_t *cond)
297 {
298 #ifdef HAVE_PTHREAD_COND_INIT
299  int r = pthread_cond_destroy(&cond->cond);
300  if (r != 0) {
301  rb_bug_errno("pthread_cond_destroy", r);
302  }
303 #endif
304 }
305 
306 /*
307  * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
308  * EAGAIN after retrying 8192 times. You can see them in the following page:
309  *
310  * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
311  *
312  * The following native_cond_signal and native_cond_broadcast functions
313  * need to retrying until pthread functions don't return EAGAIN.
314  */
315 
316 static void
317 native_cond_signal(rb_nativethread_cond_t *cond)
318 {
319  int r;
320  do {
321  r = pthread_cond_signal(&cond->cond);
322  } while (r == EAGAIN);
323  if (r != 0) {
324  rb_bug_errno("pthread_cond_signal", r);
325  }
326 }
327 
328 static void
329 native_cond_broadcast(rb_nativethread_cond_t *cond)
330 {
331  int r;
332  do {
333  r = pthread_cond_broadcast(&cond->cond);
334  } while (r == EAGAIN);
335  if (r != 0) {
336  rb_bug_errno("native_cond_broadcast", r);
337  }
338 }
339 
340 static void
341 native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
342 {
343  int r = pthread_cond_wait(&cond->cond, mutex);
344  if (r != 0) {
345  rb_bug_errno("pthread_cond_wait", r);
346  }
347 }
348 
349 static int
350 native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *ts)
351 {
352  int r;
353 
354  /*
355  * An old Linux may return EINTR. Even though POSIX says
356  * "These functions shall not return an error code of [EINTR]".
357  * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
358  * Let's hide it from arch generic code.
359  */
360  do {
361  r = pthread_cond_timedwait(&cond->cond, mutex, ts);
362  } while (r == EINTR);
363 
364  if (r != 0 && r != ETIMEDOUT) {
365  rb_bug_errno("pthread_cond_timedwait", r);
366  }
367 
368  return r;
369 }
370 
371 static struct timespec
372 native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
373 {
374  int ret;
375  struct timeval tv;
376  struct timespec timeout;
377  struct timespec now;
378 
379 #if USE_MONOTONIC_COND
380  if (cond->clockid == CLOCK_MONOTONIC) {
381  ret = clock_gettime(cond->clockid, &now);
382  if (ret != 0)
383  rb_sys_fail("clock_gettime()");
384  goto out;
385  }
386 
387  if (cond->clockid != CLOCK_REALTIME)
388  rb_bug("unsupported clockid %"PRIdVALUE, (SIGNED_VALUE)cond->clockid);
389 #endif
390 
391  ret = gettimeofday(&tv, 0);
392  if (ret != 0)
393  rb_sys_fail(0);
394  now.tv_sec = tv.tv_sec;
395  now.tv_nsec = tv.tv_usec * 1000;
396 
397 #if USE_MONOTONIC_COND
398  out:
399 #endif
400  timeout.tv_sec = now.tv_sec;
401  timeout.tv_nsec = now.tv_nsec;
402  timeout.tv_sec += timeout_rel.tv_sec;
403  timeout.tv_nsec += timeout_rel.tv_nsec;
404 
405  if (timeout.tv_nsec >= 1000*1000*1000) {
406  timeout.tv_sec++;
407  timeout.tv_nsec -= 1000*1000*1000;
408  }
409 
410  if (timeout.tv_sec < now.tv_sec)
411  timeout.tv_sec = TIMET_MAX;
412 
413  return timeout;
414 }
415 
416 #define native_cleanup_push pthread_cleanup_push
417 #define native_cleanup_pop pthread_cleanup_pop
418 #ifdef HAVE_SCHED_YIELD
419 #define native_thread_yield() (void)sched_yield()
420 #else
421 #define native_thread_yield() ((void)0)
422 #endif
423 
424 #if defined(SIGVTALRM) && !defined(__CYGWIN__)
425 #define USE_UBF_LIST 1
426 static rb_nativethread_lock_t ubf_list_lock;
427 #endif
428 
429 static pthread_key_t ruby_native_thread_key;
430 
431 static void
432 null_func(int i)
433 {
434  /* null */
435 }
436 
437 static rb_thread_t *
438 ruby_thread_from_native(void)
439 {
440  return pthread_getspecific(ruby_native_thread_key);
441 }
442 
443 static int
444 ruby_thread_set_native(rb_thread_t *th)
445 {
446  return pthread_setspecific(ruby_native_thread_key, th) == 0;
447 }
448 
449 static void native_thread_init(rb_thread_t *th);
450 
451 void
452 Init_native_thread(void)
453 {
454  rb_thread_t *th = GET_THREAD();
455 
456  pthread_key_create(&ruby_native_thread_key, NULL);
457  th->thread_id = pthread_self();
458  fill_thread_id_str(th);
459  native_thread_init(th);
460 #ifdef USE_UBF_LIST
461  native_mutex_initialize(&ubf_list_lock);
462 #endif
463 #ifndef __native_client__
464  posix_signal(SIGVTALRM, null_func);
465 #endif
466 }
467 
468 static void
469 native_thread_init(rb_thread_t *th)
470 {
472 
473 #ifdef USE_UBF_LIST
474  list_node_init(&nd->ubf_list);
475 #endif
476  native_cond_initialize(&nd->sleep_cond, RB_CONDATTR_CLOCK_MONOTONIC);
477  ruby_thread_set_native(th);
478 }
479 
480 static void
481 native_thread_destroy(rb_thread_t *th)
482 {
483  native_cond_destroy(&th->native_thread_data.sleep_cond);
484 }
485 
486 #ifndef USE_THREAD_CACHE
487 #define USE_THREAD_CACHE 0
488 #endif
489 
490 #if USE_THREAD_CACHE
491 static rb_thread_t *register_cached_thread_and_wait(void);
492 #endif
493 
494 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
495 #define STACKADDR_AVAILABLE 1
496 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
497 #define STACKADDR_AVAILABLE 1
498 #undef MAINSTACKADDR_AVAILABLE
499 #define MAINSTACKADDR_AVAILABLE 1
500 void *pthread_get_stackaddr_np(pthread_t);
501 size_t pthread_get_stacksize_np(pthread_t);
502 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
503 #define STACKADDR_AVAILABLE 1
504 #elif defined HAVE_PTHREAD_GETTHRDS_NP
505 #define STACKADDR_AVAILABLE 1
506 #elif defined __HAIKU__
507 #define STACKADDR_AVAILABLE 1
508 #elif defined __ia64 && defined _HPUX_SOURCE
509 #include <sys/dyntune.h>
510 
511 #define STACKADDR_AVAILABLE 1
512 
513 /*
514  * Do not lower the thread's stack to PTHREAD_STACK_MIN,
515  * otherwise one would receive a 'sendsig: useracc failed.'
516  * and a coredump.
517  */
518 #undef PTHREAD_STACK_MIN
519 
520 #define HAVE_PTHREAD_ATTR_GET_NP 1
521 #undef HAVE_PTHREAD_ATTR_GETSTACK
522 
523 /*
524  * As the PTHREAD_STACK_MIN is undefined and
525  * no one touches the default stacksize,
526  * it is just fine to use the default.
527  */
528 #define pthread_attr_get_np(thid, attr) 0
529 
530 /*
531  * Using value of sp is very rough... To make it more real,
532  * addr would need to be aligned to vps_pagesize.
533  * The vps_pagesize is 'Default user page size (kBytes)'
534  * and could be retrieved by gettune().
535  */
536 static int
537 hpux_attr_getstackaddr(const pthread_attr_t *attr, void **addr)
538 {
539  static uint64_t pagesize;
540  size_t size;
541 
542  if (!pagesize) {
543  if (gettune("vps_pagesize", &pagesize)) {
544  pagesize = 16;
545  }
546  pagesize *= 1024;
547  }
548  pthread_attr_getstacksize(attr, &size);
549  *addr = (void *)((size_t)((char *)_Asm_get_sp() - size) & ~(pagesize - 1));
550  return 0;
551 }
552 #define pthread_attr_getstackaddr(attr, addr) hpux_attr_getstackaddr(attr, addr)
553 #endif
554 
555 #ifndef MAINSTACKADDR_AVAILABLE
556 # ifdef STACKADDR_AVAILABLE
557 # define MAINSTACKADDR_AVAILABLE 1
558 # else
559 # define MAINSTACKADDR_AVAILABLE 0
560 # endif
561 #endif
562 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
563 # define get_main_stack(addr, size) get_stack(addr, size)
564 #endif
565 
566 #ifdef STACKADDR_AVAILABLE
567 /*
568  * Get the initial address and size of current thread's stack
569  */
570 static int
571 get_stack(void **addr, size_t *size)
572 {
573 #define CHECK_ERR(expr) \
574  {int err = (expr); if (err) return err;}
575 #ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
576  pthread_attr_t attr;
577  size_t guard = 0;
579  CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
580 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
581  CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
582  STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
583 # else
584  CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
585  CHECK_ERR(pthread_attr_getstacksize(&attr, size));
586 # endif
587  CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
588  *size -= guard;
589  pthread_attr_destroy(&attr);
590 #elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
591  pthread_attr_t attr;
592  CHECK_ERR(pthread_attr_init(&attr));
593  CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
594 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
595  CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
596 # else
597  CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
598  CHECK_ERR(pthread_attr_getstacksize(&attr, size));
599 # endif
600  STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
601  pthread_attr_destroy(&attr);
602 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
603  pthread_t th = pthread_self();
604  *addr = pthread_get_stackaddr_np(th);
605  *size = pthread_get_stacksize_np(th);
606 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
607  stack_t stk;
608 # if defined HAVE_THR_STKSEGMENT /* Solaris */
609  CHECK_ERR(thr_stksegment(&stk));
610 # else /* OpenBSD */
611  CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
612 # endif
613  *addr = stk.ss_sp;
614  *size = stk.ss_size;
615 #elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
616  pthread_t th = pthread_self();
617  struct __pthrdsinfo thinfo;
618  char reg[256];
619  int regsiz=sizeof(reg);
620  CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
621  &thinfo, sizeof(thinfo),
622  &reg, &regsiz));
623  *addr = thinfo.__pi_stackaddr;
624  /* Must not use thinfo.__pi_stacksize for size.
625  It is around 3KB smaller than the correct size
626  calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
627  *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
628  STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
629 #elif defined __HAIKU__
630  thread_info info;
632  CHECK_ERR(get_thread_info(find_thread(NULL), &info));
633  *addr = info.stack_base;
634  *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
635  STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
636 #else
637 #error STACKADDR_AVAILABLE is defined but not implemented.
638 #endif
639  return 0;
640 #undef CHECK_ERR
641 }
642 #endif
643 
644 static struct {
645  rb_nativethread_id_t id;
646  size_t stack_maxsize;
647  VALUE *stack_start;
648 #ifdef __ia64
649  VALUE *register_stack_start;
650 #endif
651 } native_main_thread;
652 
653 #ifdef STACK_END_ADDRESS
654 extern void *STACK_END_ADDRESS;
655 #endif
656 
657 enum {
658  RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
659  RUBY_STACK_SPACE_RATIO = 5
660 };
661 
662 static size_t
663 space_size(size_t stack_size)
664 {
665  size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
666  if (space_size > RUBY_STACK_SPACE_LIMIT) {
667  return RUBY_STACK_SPACE_LIMIT;
668  }
669  else {
670  return space_size;
671  }
672 }
673 
674 #ifdef __linux__
675 static __attribute__((noinline)) void
676 reserve_stack(volatile char *limit, size_t size)
677 {
678 # ifdef C_ALLOCA
679 # error needs alloca()
680 # endif
681  struct rlimit rl;
682  volatile char buf[0x100];
683  enum {stack_check_margin = 0x1000}; /* for -fstack-check */
684 
686 
687  if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
688  return;
689 
690  if (size < stack_check_margin) return;
691  size -= stack_check_margin;
692 
693  size -= sizeof(buf); /* margin */
694  if (IS_STACK_DIR_UPPER()) {
695  const volatile char *end = buf + sizeof(buf);
696  limit += size;
697  if (limit > end) {
698  /* |<-bottom (=limit(a)) top->|
699  * | .. |<-buf 256B |<-end | stack check |
700  * | 256B | =size= | margin (4KB)|
701  * | =size= limit(b)->| 256B | |
702  * | | alloca(sz) | | |
703  * | .. |<-buf |<-limit(c) [sz-1]->0> | |
704  */
705  size_t sz = limit - end;
706  limit = alloca(sz);
707  limit[sz-1] = 0;
708  }
709  }
710  else {
711  limit -= size;
712  if (buf > limit) {
713  /* |<-top (=limit(a)) bottom->|
714  * | .. | 256B buf->| | stack check |
715  * | 256B | =size= | margin (4KB)|
716  * | =size= limit(b)->| 256B | |
717  * | | alloca(sz) | | |
718  * | .. | buf->| limit(c)-><0> | |
719  */
720  size_t sz = buf - limit;
721  limit = alloca(sz);
722  limit[0] = 0;
723  }
724  }
725 }
726 #else
727 # define reserve_stack(limit, size) ((void)(limit), (void)(size))
728 #endif
729 
730 #undef ruby_init_stack
731 /* Set stack bottom of Ruby implementation.
732  *
733  * You must call this function before any heap allocation by Ruby implementation.
734  * Or GC will break living objects */
735 void
736 ruby_init_stack(volatile VALUE *addr
737 #ifdef __ia64
738  , void *bsp
739 #endif
740  )
741 {
742  native_main_thread.id = pthread_self();
743 #ifdef __ia64
744  if (!native_main_thread.register_stack_start ||
745  (VALUE*)bsp < native_main_thread.register_stack_start) {
746  native_main_thread.register_stack_start = (VALUE*)bsp;
747  }
748 #endif
749 #if MAINSTACKADDR_AVAILABLE
750  if (native_main_thread.stack_maxsize) return;
751  {
752  void* stackaddr;
753  size_t size;
754  if (get_main_stack(&stackaddr, &size) == 0) {
755  native_main_thread.stack_maxsize = size;
756  native_main_thread.stack_start = stackaddr;
757  reserve_stack(stackaddr, size);
758  goto bound_check;
759  }
760  }
761 #endif
762 #ifdef STACK_END_ADDRESS
763  native_main_thread.stack_start = STACK_END_ADDRESS;
764 #else
765  if (!native_main_thread.stack_start ||
766  STACK_UPPER((VALUE *)(void *)&addr,
767  native_main_thread.stack_start > addr,
768  native_main_thread.stack_start < addr)) {
769  native_main_thread.stack_start = (VALUE *)addr;
770  }
771 #endif
772  {
773 #if defined(HAVE_GETRLIMIT)
774 #if defined(PTHREAD_STACK_DEFAULT)
775 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
776 # error "PTHREAD_STACK_DEFAULT is too small"
777 # endif
778  size_t size = PTHREAD_STACK_DEFAULT;
779 #else
780  size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
781 #endif
782  size_t space;
783  int pagesize = getpagesize();
784  struct rlimit rlim;
786  if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
787  size = (size_t)rlim.rlim_cur;
788  }
789  addr = native_main_thread.stack_start;
790  if (IS_STACK_DIR_UPPER()) {
791  space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
792  }
793  else {
794  space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
795  }
796  native_main_thread.stack_maxsize = space;
797 #endif
798  }
799 
800 #if MAINSTACKADDR_AVAILABLE
801  bound_check:
802 #endif
803  /* If addr is out of range of main-thread stack range estimation, */
804  /* it should be on co-routine (alternative stack). [Feature #2294] */
805  {
806  void *start, *end;
808 
809  if (IS_STACK_DIR_UPPER()) {
810  start = native_main_thread.stack_start;
811  end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
812  }
813  else {
814  start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
815  end = native_main_thread.stack_start;
816  }
817 
818  if ((void *)addr < start || (void *)addr > end) {
819  /* out of range */
820  native_main_thread.stack_start = (VALUE *)addr;
821  native_main_thread.stack_maxsize = 0; /* unknown */
822  }
823  }
824 }
825 
826 #define CHECK_ERR(expr) \
827  {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
828 
829 static int
830 native_thread_init_stack(rb_thread_t *th)
831 {
832  rb_nativethread_id_t curr = pthread_self();
833 
834  if (pthread_equal(curr, native_main_thread.id)) {
835  th->ec.machine.stack_start = native_main_thread.stack_start;
836  th->ec.machine.stack_maxsize = native_main_thread.stack_maxsize;
837  }
838  else {
839 #ifdef STACKADDR_AVAILABLE
840  void *start;
841  size_t size;
842 
843  if (get_stack(&start, &size) == 0) {
844  th->ec.machine.stack_start = start;
846  }
847 #elif defined get_stack_of
848  if (!th->ec.machine.stack_maxsize) {
849  native_mutex_lock(&th->interrupt_lock);
850  native_mutex_unlock(&th->interrupt_lock);
851  }
852 #else
853  rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
854 #endif
855  }
856 #ifdef __ia64
857  th->ec.machine.register_stack_start = native_main_thread.register_stack_start;
858  th->ec.machine.stack_maxsize /= 2;
859  th->ec.machine.register_stack_maxsize = th->ec.machine.stack_maxsize;
860 #endif
861  return 0;
862 }
863 
864 #ifndef __CYGWIN__
865 #define USE_NATIVE_THREAD_INIT 1
866 #endif
867 
868 static void *
869 thread_start_func_1(void *th_ptr)
870 {
871 #if USE_THREAD_CACHE
872  thread_start:
873 #endif
874  {
875  rb_thread_t *th = th_ptr;
876 #if !defined USE_NATIVE_THREAD_INIT
877  VALUE stack_start;
878 #endif
879 
880  fill_thread_id_str(th);
881 #if defined USE_NATIVE_THREAD_INIT
882  native_thread_init_stack(th);
883 #endif
884  native_thread_init(th);
885  /* run */
886 #if defined USE_NATIVE_THREAD_INIT
887  thread_start_func_2(th, th->ec.machine.stack_start, rb_ia64_bsp());
888 #else
889  thread_start_func_2(th, &stack_start, rb_ia64_bsp());
890 #endif
891  }
892 #if USE_THREAD_CACHE
893  if (1) {
894  /* cache thread */
895  rb_thread_t *th;
896  if ((th = register_cached_thread_and_wait()) != 0) {
897  th_ptr = (void *)th;
898  th->thread_id = pthread_self();
899  goto thread_start;
900  }
901  }
902 #endif
903  return 0;
904 }
905 
906 struct cached_thread_entry {
907  volatile rb_thread_t **th_area;
909  struct cached_thread_entry *next;
910 };
911 
912 
913 #if USE_THREAD_CACHE
914 static rb_nativethread_lock_t thread_cache_lock = RB_NATIVETHREAD_LOCK_INIT;
915 struct cached_thread_entry *cached_thread_root;
916 
917 static rb_thread_t *
918 register_cached_thread_and_wait(void)
919 {
921  volatile rb_thread_t *th_area = 0;
922  struct timeval tv;
923  struct timespec ts;
924  struct cached_thread_entry *entry =
925  (struct cached_thread_entry *)malloc(sizeof(struct cached_thread_entry));
926 
927  if (entry == 0) {
928  return 0; /* failed -> terminate thread immediately */
929  }
930 
931  gettimeofday(&tv, 0);
932  ts.tv_sec = tv.tv_sec + 60;
933  ts.tv_nsec = tv.tv_usec * 1000;
934 
935  native_mutex_lock(&thread_cache_lock);
936  {
937  entry->th_area = &th_area;
938  entry->cond = &cond;
939  entry->next = cached_thread_root;
940  cached_thread_root = entry;
941 
942  native_cond_timedwait(&cond, &thread_cache_lock, &ts);
943 
944  {
945  struct cached_thread_entry *e, **prev = &cached_thread_root;
946 
947  while ((e = *prev) != 0) {
948  if (e == entry) {
949  *prev = e->next;
950  break;
951  }
952  prev = &e->next;
953  }
954  }
955 
956  free(entry); /* ok */
957  native_cond_destroy(&cond);
958  }
959  native_mutex_unlock(&thread_cache_lock);
960 
961  return (rb_thread_t *)th_area;
962 }
963 #endif
964 
965 static int
966 use_cached_thread(rb_thread_t *th)
967 {
968  int result = 0;
969 #if USE_THREAD_CACHE
970  struct cached_thread_entry *entry;
971 
972  if (cached_thread_root) {
973  native_mutex_lock(&thread_cache_lock);
974  entry = cached_thread_root;
975  {
976  if (cached_thread_root) {
977  cached_thread_root = entry->next;
978  *entry->th_area = th;
979  result = 1;
980  }
981  }
982  if (result) {
983  native_cond_signal(entry->cond);
984  }
985  native_mutex_unlock(&thread_cache_lock);
986  }
987 #endif
988  return result;
989 }
990 
991 static int
992 native_thread_create(rb_thread_t *th)
993 {
994  int err = 0;
995 
996  if (use_cached_thread(th)) {
997  thread_debug("create (use cached thread): %p\n", (void *)th);
998  }
999  else {
1000 #ifdef HAVE_PTHREAD_ATTR_INIT
1001  pthread_attr_t attr;
1002  pthread_attr_t *const attrp = &attr;
1003 #else
1004  pthread_attr_t *const attrp = NULL;
1005 #endif
1006  const size_t stack_size = th->vm->default_params.thread_machine_stack_size;
1007  const size_t space = space_size(stack_size);
1008 
1009  th->ec.machine.stack_maxsize = stack_size - space;
1010 #ifdef __ia64
1011  th->ec.machine.stack_maxsize /= 2;
1012  th->ec.machine.register_stack_maxsize = th->ec.machine.stack_maxsize;
1013 #endif
1014 
1015 #ifdef HAVE_PTHREAD_ATTR_INIT
1016  CHECK_ERR(pthread_attr_init(&attr));
1017 
1018 # ifdef PTHREAD_STACK_MIN
1019  thread_debug("create - stack size: %lu\n", (unsigned long)stack_size);
1020  CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
1021 # endif
1022 
1023 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1024  CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
1025 # endif
1026  CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1027 #endif
1028 #ifdef get_stack_of
1029  native_mutex_lock(&th->interrupt_lock);
1030 #endif
1031  err = pthread_create(&th->thread_id, attrp, thread_start_func_1, th);
1032 #ifdef get_stack_of
1033  if (!err) {
1034  get_stack_of(th->thread_id,
1035  &th->ec.machine.stack_start,
1036  &th->ec.machine.stack_maxsize);
1037  }
1038  native_mutex_unlock(&th->interrupt_lock);
1039 #endif
1040  thread_debug("create: %p (%d)\n", (void *)th, err);
1041  /* should be done in the created thread */
1042  fill_thread_id_str(th);
1043 #ifdef HAVE_PTHREAD_ATTR_INIT
1044  CHECK_ERR(pthread_attr_destroy(&attr));
1045 #endif
1046  }
1047  return err;
1048 }
1049 
1050 #if USE_SLEEPY_TIMER_THREAD
1051 static void
1052 native_thread_join(pthread_t th)
1053 {
1054  int err = pthread_join(th, 0);
1055  if (err) {
1056  rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
1057  }
1058 }
1059 #endif
1060 
1061 
1062 #if USE_NATIVE_THREAD_PRIORITY
1063 
1064 static void
1065 native_thread_apply_priority(rb_thread_t *th)
1066 {
1067 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
1068  struct sched_param sp;
1069  int policy;
1070  int priority = 0 - th->priority;
1071  int max, min;
1072  pthread_getschedparam(th->thread_id, &policy, &sp);
1073  max = sched_get_priority_max(policy);
1074  min = sched_get_priority_min(policy);
1075 
1076  if (min > priority) {
1077  priority = min;
1078  }
1079  else if (max < priority) {
1080  priority = max;
1081  }
1082 
1083  sp.sched_priority = priority;
1084  pthread_setschedparam(th->thread_id, policy, &sp);
1085 #else
1086  /* not touched */
1087 #endif
1088 }
1089 
1090 #endif /* USE_NATIVE_THREAD_PRIORITY */
1091 
1092 static int
1093 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
1094 {
1095  return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
1096 }
1097 
1098 static void
1099 ubf_pthread_cond_signal(void *ptr)
1100 {
1101  rb_thread_t *th = (rb_thread_t *)ptr;
1102  thread_debug("ubf_pthread_cond_signal (%p)\n", (void *)th);
1103  native_cond_signal(&th->native_thread_data.sleep_cond);
1104 }
1105 
1106 static void
1107 native_sleep(rb_thread_t *th, struct timeval *timeout_tv)
1108 {
1109  struct timespec timeout;
1110  rb_nativethread_lock_t *lock = &th->interrupt_lock;
1112 
1113  if (timeout_tv) {
1114  struct timespec timeout_rel;
1115 
1116  timeout_rel.tv_sec = timeout_tv->tv_sec;
1117  timeout_rel.tv_nsec = timeout_tv->tv_usec * 1000;
1118 
1119  /* Solaris cond_timedwait() return EINVAL if an argument is greater than
1120  * current_time + 100,000,000. So cut up to 100,000,000. This is
1121  * considered as a kind of spurious wakeup. The caller to native_sleep
1122  * should care about spurious wakeup.
1123  *
1124  * See also [Bug #1341] [ruby-core:29702]
1125  * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
1126  */
1127  if (timeout_rel.tv_sec > 100000000) {
1128  timeout_rel.tv_sec = 100000000;
1129  timeout_rel.tv_nsec = 0;
1130  }
1131 
1132  timeout = native_cond_timeout(cond, timeout_rel);
1133  }
1134 
1135  GVL_UNLOCK_BEGIN();
1136  {
1137  native_mutex_lock(lock);
1138  th->unblock.func = ubf_pthread_cond_signal;
1139  th->unblock.arg = th;
1140 
1141  if (RUBY_VM_INTERRUPTED(th)) {
1142  /* interrupted. return immediate */
1143  thread_debug("native_sleep: interrupted before sleep\n");
1144  }
1145  else {
1146  if (!timeout_tv)
1147  native_cond_wait(cond, lock);
1148  else
1149  native_cond_timedwait(cond, lock, &timeout);
1150  }
1151  th->unblock.func = 0;
1152  th->unblock.arg = 0;
1153 
1154  native_mutex_unlock(lock);
1155  }
1156  GVL_UNLOCK_END();
1157 
1158  thread_debug("native_sleep done\n");
1159 }
1160 
1161 #ifdef USE_UBF_LIST
1162 static LIST_HEAD(ubf_list_head);
1163 
1164 /* The thread 'th' is registered to be trying unblock. */
1165 static void
1166 register_ubf_list(rb_thread_t *th)
1167 {
1168  struct list_node *node = &th->native_thread_data.ubf_list;
1169 
1170  if (list_empty((struct list_head*)node)) {
1171  native_mutex_lock(&ubf_list_lock);
1172  list_add(&ubf_list_head, node);
1173  native_mutex_unlock(&ubf_list_lock);
1174  }
1175 }
1176 
1177 /* The thread 'th' is unblocked. It no longer need to be registered. */
1178 static void
1179 unregister_ubf_list(rb_thread_t *th)
1180 {
1181  struct list_node *node = &th->native_thread_data.ubf_list;
1182 
1183  if (!list_empty((struct list_head*)node)) {
1184  native_mutex_lock(&ubf_list_lock);
1185  list_del_init(node);
1186  native_mutex_unlock(&ubf_list_lock);
1187  }
1188 }
1189 
1190 /*
1191  * send a signal to intent that a target thread return from blocking syscall.
1192  * Maybe any signal is ok, but we chose SIGVTALRM.
1193  */
1194 static void
1195 ubf_wakeup_thread(rb_thread_t *th)
1196 {
1197  thread_debug("thread_wait_queue_wakeup (%"PRI_THREAD_ID")\n", thread_id_str(th));
1198  if (th)
1199  pthread_kill(th->thread_id, SIGVTALRM);
1200 }
1201 
1202 static void
1203 ubf_select(void *ptr)
1204 {
1205  rb_thread_t *th = (rb_thread_t *)ptr;
1206  register_ubf_list(th);
1207 
1208  /*
1209  * ubf_wakeup_thread() doesn't guarantee to wake up a target thread.
1210  * Therefore, we repeatedly call ubf_wakeup_thread() until a target thread
1211  * exit from ubf function.
1212  * In the other hands, we shouldn't call rb_thread_wakeup_timer_thread()
1213  * if running on timer thread because it may make endless wakeups.
1214  */
1215  if (!pthread_equal(pthread_self(), timer_thread.id))
1217  ubf_wakeup_thread(th);
1218 }
1219 
1220 static int
1221 ubf_threads_empty(void)
1222 {
1223  return list_empty(&ubf_list_head);
1224 }
1225 
1226 static void
1227 ubf_wakeup_all_threads(void)
1228 {
1229  rb_thread_t *th;
1230 
1231  if (!ubf_threads_empty()) {
1232  native_mutex_lock(&ubf_list_lock);
1233  list_for_each(&ubf_list_head, th,
1234  native_thread_data.ubf_list) {
1235  ubf_wakeup_thread(th);
1236  }
1237  native_mutex_unlock(&ubf_list_lock);
1238  }
1239 }
1240 
1241 #else /* USE_UBF_LIST */
1242 #define register_ubf_list(th) (void)(th)
1243 #define unregister_ubf_list(th) (void)(th)
1244 #define ubf_select 0
1245 static void ubf_wakeup_all_threads(void) { return; }
1246 static int ubf_threads_empty(void) { return 1; }
1247 #endif /* USE_UBF_LIST */
1248 
1249 #define TT_DEBUG 0
1250 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1251 
1252 /* 100ms. 10ms is too small for user level thread scheduling
1253  * on recent Linux (tested on 2.6.35)
1254  */
1255 #define TIME_QUANTUM_USEC (100 * 1000)
1256 
1257 #if USE_SLEEPY_TIMER_THREAD
1258 static struct {
1259  /*
1260  * Read end of each pipe is closed inside timer thread for shutdown
1261  * Write ends are closed by a normal Ruby thread during shutdown
1262  */
1263  int normal[2];
1264  int low[2];
1265 
1266  /* volatile for signal handler use: */
1267  volatile rb_pid_t owner_process;
1268  rb_atomic_t writing;
1269 } timer_thread_pipe = {
1270  {-1, -1},
1271  {-1, -1}, /* low priority */
1272 };
1273 
1274 NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
1275 static void
1276 async_bug_fd(const char *mesg, int errno_arg, int fd)
1277 {
1278  char buff[64];
1279  size_t n = strlcpy(buff, mesg, sizeof(buff));
1280  if (n < sizeof(buff)-3) {
1281  ruby_snprintf(buff, sizeof(buff)-n, "(%d)", fd);
1282  }
1283  rb_async_bug_errno(buff, errno_arg);
1284 }
1285 
1286 /* only use signal-safe system calls here */
1287 static void
1288 rb_thread_wakeup_timer_thread_fd(volatile int *fdp)
1289 {
1290  ssize_t result;
1291  int fd = *fdp; /* access fdp exactly once here and do not reread fdp */
1292 
1293  /* already opened */
1294  if (fd >= 0 && timer_thread_pipe.owner_process == getpid()) {
1295  static const char buff[1] = {'!'};
1296  retry:
1297  if ((result = write(fd, buff, 1)) <= 0) {
1298  int e = errno;
1299  switch (e) {
1300  case EINTR: goto retry;
1301  case EAGAIN:
1302 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1303  case EWOULDBLOCK:
1304 #endif
1305  break;
1306  default:
1307  async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
1308  }
1309  }
1310  if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
1311  }
1312  else {
1313  /* ignore wakeup */
1314  }
1315 }
1316 
1317 void
1319 {
1320  /* must be safe inside sighandler, so no mutex */
1321  if (timer_thread_pipe.owner_process == getpid()) {
1322  ATOMIC_INC(timer_thread_pipe.writing);
1323  rb_thread_wakeup_timer_thread_fd(&timer_thread_pipe.normal[1]);
1324  ATOMIC_DEC(timer_thread_pipe.writing);
1325  }
1326 }
1327 
1328 static void
1329 rb_thread_wakeup_timer_thread_low(void)
1330 {
1331  if (timer_thread_pipe.owner_process == getpid()) {
1332  ATOMIC_INC(timer_thread_pipe.writing);
1333  rb_thread_wakeup_timer_thread_fd(&timer_thread_pipe.low[1]);
1334  ATOMIC_DEC(timer_thread_pipe.writing);
1335  }
1336 }
1337 
1338 /* VM-dependent API is not available for this function */
1339 static void
1340 consume_communication_pipe(int fd)
1341 {
1342 #define CCP_READ_BUFF_SIZE 1024
1343  /* buffer can be shared because no one refers to them. */
1344  static char buff[CCP_READ_BUFF_SIZE];
1345  ssize_t result;
1346 
1347  while (1) {
1348  result = read(fd, buff, sizeof(buff));
1349  if (result == 0) {
1350  return;
1351  }
1352  else if (result < 0) {
1353  int e = errno;
1354  switch (e) {
1355  case EINTR:
1356  continue; /* retry */
1357  case EAGAIN:
1358 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1359  case EWOULDBLOCK:
1360 #endif
1361  return;
1362  default:
1363  async_bug_fd("consume_communication_pipe: read", e, fd);
1364  }
1365  }
1366  }
1367 }
1368 
1369 #define CLOSE_INVALIDATE(expr) \
1370  close_invalidate(&timer_thread_pipe.expr,"close_invalidate: "#expr)
1371 static void
1372 close_invalidate(volatile int *fdp, const char *msg)
1373 {
1374  int fd = *fdp; /* access fdp exactly once here and do not reread fdp */
1375 
1376  *fdp = -1;
1377  if (close(fd) < 0) {
1378  async_bug_fd(msg, errno, fd);
1379  }
1380 }
1381 
1382 static void
1383 set_nonblock(int fd)
1384 {
1385  int oflags;
1386  int err;
1387 
1388  oflags = fcntl(fd, F_GETFL);
1389  if (oflags == -1)
1390  rb_sys_fail(0);
1391  oflags |= O_NONBLOCK;
1392  err = fcntl(fd, F_SETFL, oflags);
1393  if (err == -1)
1394  rb_sys_fail(0);
1395 }
1396 
1397 static int
1398 setup_communication_pipe_internal(int pipes[2])
1399 {
1400  int err;
1401 
1402  err = rb_cloexec_pipe(pipes);
1403  if (err != 0) {
1404  rb_warn("Failed to create communication pipe for timer thread: %s",
1405  strerror(errno));
1406  return -1;
1407  }
1408  rb_update_max_fd(pipes[0]);
1409  rb_update_max_fd(pipes[1]);
1410  set_nonblock(pipes[0]);
1411  set_nonblock(pipes[1]);
1412  return 0;
1413 }
1414 
1415 /* communication pipe with timer thread and signal handler */
1416 static int
1417 setup_communication_pipe(void)
1418 {
1419  VM_ASSERT(timer_thread_pipe.owner_process == 0);
1420  VM_ASSERT(timer_thread_pipe.normal[0] == -1);
1421  VM_ASSERT(timer_thread_pipe.normal[1] == -1);
1422  VM_ASSERT(timer_thread_pipe.low[0] == -1);
1423  VM_ASSERT(timer_thread_pipe.low[1] == -1);
1424 
1425  if (setup_communication_pipe_internal(timer_thread_pipe.normal) < 0) {
1426  return errno;
1427  }
1428  if (setup_communication_pipe_internal(timer_thread_pipe.low) < 0) {
1429  int e = errno;
1430  CLOSE_INVALIDATE(normal[0]);
1431  CLOSE_INVALIDATE(normal[1]);
1432  return e;
1433  }
1434 
1435  return 0;
1436 }
1437 
1444 static inline void
1445 timer_thread_sleep(rb_global_vm_lock_t* gvl)
1446 {
1447  int result;
1448  int need_polling;
1449  struct pollfd pollfds[2];
1450 
1451  pollfds[0].fd = timer_thread_pipe.normal[0];
1452  pollfds[0].events = POLLIN;
1453  pollfds[1].fd = timer_thread_pipe.low[0];
1454  pollfds[1].events = POLLIN;
1455 
1456  need_polling = !ubf_threads_empty();
1457 
1458  if (gvl->waiting > 0 || need_polling) {
1459  /* polling (TIME_QUANTUM_USEC usec) */
1460  result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
1461  }
1462  else {
1463  /* wait (infinite) */
1464  result = poll(pollfds, numberof(pollfds), -1);
1465  }
1466 
1467  if (result == 0) {
1468  /* maybe timeout */
1469  }
1470  else if (result > 0) {
1471  consume_communication_pipe(timer_thread_pipe.normal[0]);
1472  consume_communication_pipe(timer_thread_pipe.low[0]);
1473  }
1474  else { /* result < 0 */
1475  int e = errno;
1476  switch (e) {
1477  case EBADF:
1478  case EINVAL:
1479  case ENOMEM: /* from Linux man */
1480  case EFAULT: /* from FreeBSD man */
1481  rb_async_bug_errno("thread_timer: select", e);
1482  default:
1483  /* ignore */;
1484  }
1485  }
1486 }
1487 
1488 #else /* USE_SLEEPY_TIMER_THREAD */
1489 # define PER_NANO 1000000000
1490 void rb_thread_wakeup_timer_thread(void) {}
1491 static void rb_thread_wakeup_timer_thread_low(void) {}
1492 
1493 static rb_nativethread_lock_t timer_thread_lock;
1494 static rb_nativethread_cond_t timer_thread_cond;
1495 
1496 static inline void
1497 timer_thread_sleep(rb_global_vm_lock_t* unused)
1498 {
1499  struct timespec ts;
1500  ts.tv_sec = 0;
1501  ts.tv_nsec = TIME_QUANTUM_USEC * 1000;
1502  ts = native_cond_timeout(&timer_thread_cond, ts);
1503 
1504  native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, &ts);
1505 }
1506 #endif /* USE_SLEEPY_TIMER_THREAD */
1507 
1508 #if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
1509 # define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
1510 #endif
1511 
1512 static void
1513 native_set_thread_name(rb_thread_t *th)
1514 {
1515 #ifdef SET_CURRENT_THREAD_NAME
1516  if (!th->first_func && th->first_proc) {
1517  VALUE loc;
1518  if (!NIL_P(loc = th->name)) {
1519  SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
1520  }
1521  else if (!NIL_P(loc = rb_proc_location(th->first_proc))) {
1522  const VALUE *ptr = RARRAY_CONST_PTR(loc); /* [ String, Integer ] */
1523  char *name, *p;
1524  char buf[16];
1525  size_t len;
1526  int n;
1527 
1528  name = RSTRING_PTR(ptr[0]);
1529  p = strrchr(name, '/'); /* show only the basename of the path. */
1530  if (p && p[1])
1531  name = p + 1;
1532 
1533  n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(ptr[1]));
1534  rb_gc_force_recycle(loc); /* acts as a GC guard, too */
1535 
1536  len = (size_t)n;
1537  if (len >= sizeof(buf)) {
1538  buf[sizeof(buf)-2] = '*';
1539  buf[sizeof(buf)-1] = '\0';
1540  }
1541  SET_CURRENT_THREAD_NAME(buf);
1542  }
1543  }
1544 #endif
1545 }
1546 
1547 static VALUE
1548 native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
1549 {
1550 #ifdef SET_ANOTHER_THREAD_NAME
1551  const char *s = "";
1552  if (!NIL_P(name)) s = RSTRING_PTR(name);
1553  SET_ANOTHER_THREAD_NAME(thread_id, s);
1554 #endif
1555  return name;
1556 }
1557 
1558 static void *
1559 thread_timer(void *p)
1560 {
1562 
1563  if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
1564 
1565 #ifdef SET_CURRENT_THREAD_NAME
1566  SET_CURRENT_THREAD_NAME("ruby-timer-thr");
1567 #endif
1568 
1569 #if !USE_SLEEPY_TIMER_THREAD
1570  native_mutex_initialize(&timer_thread_lock);
1571  native_cond_initialize(&timer_thread_cond, RB_CONDATTR_CLOCK_MONOTONIC);
1572  native_mutex_lock(&timer_thread_lock);
1573 #endif
1574  while (system_working > 0) {
1575 
1576  /* timer function */
1577  ubf_wakeup_all_threads();
1578  timer_thread_function(0);
1579 
1580  if (TT_DEBUG) WRITE_CONST(2, "tick\n");
1581 
1582  /* wait */
1583  timer_thread_sleep(gvl);
1584  }
1585 #if USE_SLEEPY_TIMER_THREAD
1586  CLOSE_INVALIDATE(normal[0]);
1587  CLOSE_INVALIDATE(low[0]);
1588 #else
1589  native_mutex_unlock(&timer_thread_lock);
1590  native_cond_destroy(&timer_thread_cond);
1591  native_mutex_destroy(&timer_thread_lock);
1592 #endif
1593 
1594  if (TT_DEBUG) WRITE_CONST(2, "finish timer thread\n");
1595  return NULL;
1596 }
1597 
1598 static void
1599 rb_thread_create_timer_thread(void)
1600 {
1601  if (!timer_thread.created) {
1602  int err;
1603 #ifdef HAVE_PTHREAD_ATTR_INIT
1604  pthread_attr_t attr;
1605  rb_vm_t *vm = GET_VM();
1606 
1607  err = pthread_attr_init(&attr);
1608  if (err != 0) {
1609  rb_warn("pthread_attr_init failed for timer: %s, scheduling broken",
1610  strerror(err));
1611  return;
1612  }
1613 # ifdef PTHREAD_STACK_MIN
1614  {
1615  const size_t min_size = (4096 * 4);
1616  /* Allocate the machine stack for the timer thread
1617  * at least 16KB (4 pages). FreeBSD 8.2 AMD64 causes
1618  * machine stack overflow only with PTHREAD_STACK_MIN.
1619  */
1620  enum {
1621  needs_more_stack =
1622 #if defined HAVE_VALGRIND_MEMCHECK_H && defined __APPLE__
1623  1
1624 #else
1625  THREAD_DEBUG != 0
1626 #endif
1627  };
1628  size_t stack_size = PTHREAD_STACK_MIN; /* may be dynamic, get only once */
1629  if (stack_size < min_size) stack_size = min_size;
1630  if (needs_more_stack) stack_size += BUFSIZ;
1631  pthread_attr_setstacksize(&attr, stack_size);
1632  }
1633 # endif
1634 #endif
1635 
1636 #if USE_SLEEPY_TIMER_THREAD
1637  err = setup_communication_pipe();
1638  if (err != 0) {
1639  rb_warn("pipe creation failed for timer: %s, scheduling broken",
1640  strerror(err));
1641  return;
1642  }
1643 #endif /* USE_SLEEPY_TIMER_THREAD */
1644 
1645  /* create timer thread */
1646  if (timer_thread.created) {
1647  rb_bug("rb_thread_create_timer_thread: Timer thread was already created\n");
1648  }
1649 #ifdef HAVE_PTHREAD_ATTR_INIT
1650  err = pthread_create(&timer_thread.id, &attr, thread_timer, &vm->gvl);
1651  pthread_attr_destroy(&attr);
1652 
1653  if (err == EINVAL) {
1654  /*
1655  * Even if we are careful with our own stack use in thread_timer(),
1656  * any third-party libraries (eg libkqueue) which rely on __thread
1657  * storage can cause small stack sizes to fail. So lets hope the
1658  * default stack size is enough for them:
1659  */
1660  err = pthread_create(&timer_thread.id, NULL, thread_timer, &vm->gvl);
1661  }
1662 #else
1663  err = pthread_create(&timer_thread.id, NULL, thread_timer, &vm->gvl);
1664 #endif
1665  if (err != 0) {
1666  rb_warn("pthread_create failed for timer: %s, scheduling broken",
1667  strerror(err));
1668 #if USE_SLEEPY_TIMER_THREAD
1669  CLOSE_INVALIDATE(normal[0]);
1670  CLOSE_INVALIDATE(normal[1]);
1671  CLOSE_INVALIDATE(low[0]);
1672  CLOSE_INVALIDATE(low[1]);
1673 #endif
1674  return;
1675  }
1676 
1677  /* validate pipe on this process */
1678  timer_thread_pipe.owner_process = getpid();
1679  timer_thread.created = 1;
1680  }
1681 }
1682 
1683 static int
1684 native_stop_timer_thread(void)
1685 {
1686  int stopped;
1687  stopped = --system_working <= 0;
1688 
1689  if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
1690 #if USE_SLEEPY_TIMER_THREAD
1691  if (stopped) {
1692  /* prevent wakeups from signal handler ASAP */
1693  timer_thread_pipe.owner_process = 0;
1694 
1695  /*
1696  * however, the above was not enough: the FD may already be
1697  * captured and in the middle of a write while we are running,
1698  * so wait for that to finish:
1699  */
1700  while (ATOMIC_CAS(timer_thread_pipe.writing, (rb_atomic_t)0, 0)) {
1701  native_thread_yield();
1702  }
1703 
1704  /* stop writing ends of pipes so timer thread notices EOF */
1705  CLOSE_INVALIDATE(normal[1]);
1706  CLOSE_INVALIDATE(low[1]);
1707 
1708  /* timer thread will stop looping when system_working <= 0: */
1709  native_thread_join(timer_thread.id);
1710 
1711  /* timer thread will close the read end on exit: */
1712  VM_ASSERT(timer_thread_pipe.normal[0] == -1);
1713  VM_ASSERT(timer_thread_pipe.low[0] == -1);
1714 
1715  if (TT_DEBUG) fprintf(stderr, "joined timer thread\n");
1716  timer_thread.created = 0;
1717  }
1718 #endif
1719  return stopped;
1720 }
1721 
1722 static void
1723 native_reset_timer_thread(void)
1724 {
1725  if (TT_DEBUG) fprintf(stderr, "reset timer thread\n");
1726 }
1727 
1728 #ifdef HAVE_SIGALTSTACK
1729 int
1730 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
1731 {
1732  void *base;
1733  size_t size;
1734  const size_t water_mark = 1024 * 1024;
1736 
1737 #ifdef STACKADDR_AVAILABLE
1738  if (get_stack(&base, &size) == 0) {
1739 # ifdef __APPLE__
1740  if (pthread_equal(th->thread_id, native_main_thread.id)) {
1741  struct rlimit rlim;
1742  if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
1743  size = (size_t)rlim.rlim_cur;
1744  }
1745  }
1746 # endif
1747  base = (char *)base + STACK_DIR_UPPER(+size, -size);
1748  }
1749  else
1750 #endif
1751  if (th) {
1752  size = th->ec.machine.stack_maxsize;
1753  base = (char *)th->ec.machine.stack_start - STACK_DIR_UPPER(0, size);
1754  }
1755  else {
1756  return 0;
1757  }
1758  size /= RUBY_STACK_SPACE_RATIO;
1759  if (size > water_mark) size = water_mark;
1760  if (IS_STACK_DIR_UPPER()) {
1761  if (size > ~(size_t)base+1) size = ~(size_t)base+1;
1762  if (addr > base && addr <= (void *)((char *)base + size)) return 1;
1763  }
1764  else {
1765  if (size > (size_t)base) size = (size_t)base;
1766  if (addr > (void *)((char *)base - size) && addr <= base) return 1;
1767  }
1768  return 0;
1769 }
1770 #endif
1771 
1772 int
1773 rb_reserved_fd_p(int fd)
1774 {
1775 #if USE_SLEEPY_TIMER_THREAD
1776  if ((fd == timer_thread_pipe.normal[0] ||
1777  fd == timer_thread_pipe.normal[1] ||
1778  fd == timer_thread_pipe.low[0] ||
1779  fd == timer_thread_pipe.low[1]) &&
1780  timer_thread_pipe.owner_process == getpid()) { /* async-signal-safe */
1781  return 1;
1782  }
1783  else {
1784  return 0;
1785  }
1786 #else
1787  return 0;
1788 #endif
1789 }
1790 
1791 rb_nativethread_id_t
1793 {
1794  return pthread_self();
1795 }
1796 
1797 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Definition: sprintf.c:1291
rb_nativethread_cond_t sleep_cond
RUBY_SYMBOL_EXPORT_BEGIN rb_nativethread_id_t rb_nativethread_self()
rb_vm_t * vm
Definition: vm_core.h:788
#define WRITE_CONST(fd, str)
Definition: error.c:570
void rb_warn(const char *fmt,...)
Definition: error.c:246
void rb_bug(const char *fmt,...)
Definition: error.c:521
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4596
void rb_update_max_fd(int fd)
Definition: io.c:191
volatile unsigned long waiting
#define NUM2INT(x)
Definition: ruby.h:684
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2284
rb_unblock_function_t * func
Definition: vm_core.h:714
#define THREAD_DEBUG
Definition: thread.c:82
rb_nativethread_cond_t switch_cond
#define CLOCK_MONOTONIC
Definition: win32.h:134
const int id
Definition: nkf.c:209
int fcntl(int, int,...)
Definition: win32.c:4297
#define STACK_UPPER(x, a, b)
Definition: gc.h:77
#define cond(node, column)
Definition: ripper.c:653
time_t tv_sec
Definition: missing.h:54
void rb_gc_force_recycle(VALUE obj)
Definition: gc.c:6175
#define thread_id_str(th)
Definition: thread.c:279
#define NORETURN(x)
Definition: defines.h:34
#define GET_THREAD()
Definition: vm_core.h:1583
time_t tv_sec
Definition: missing.h:61
#define PRI_THREAD_ID
Definition: thread.c:280
#define UNLIKELY(x)
Definition: internal.h:43
unsigned long long uint64_t
Definition: sha2.h:102
fd_set rb_fdset_t
Definition: intern.h:346
#define RUBY_VM_THREAD_VM_STACK_SIZE
Definition: vm_core.h:595
#define ATOMIC_DEC(var)
Definition: ruby_atomic.h:129
void rb_thread_wakeup_timer_thread(void)
void rb_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:552
#define F_SETFL
Definition: win32.h:587
long tv_usec
Definition: missing.h:55
void rb_async_bug_errno(const char *mesg, int errno_arg)
Definition: error.c:573
#define PRIdVALUE
Definition: ruby.h:130
#define snprintf
Definition: subst.h:6
#define NIL_P(v)
Definition: ruby.h:451
long tv_nsec
Definition: missing.h:62
#define thread_debug
Definition: thread.c:273
VALUE rb_proc_location(VALUE self)
Definition: proc.c:1142
int err
Definition: win32.c:135
#define EXIT_FAILURE
Definition: eval_intern.h:33
#define GVL_UNLOCK_BEGIN()
Definition: thread.c:146
rb_nativethread_cond_t cond
#define ATOMIC_CAS(var, oldval, newval)
Definition: ruby_atomic.h:132
#define numberof(array)
Definition: etc.c:618
void rb_sys_fail(const char *mesg)
Definition: error.c:2403
struct list_node ubf_list
#define RARRAY_CONST_PTR(a)
Definition: ruby.h:1021
int errno
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:85
#define CLOCK_REALTIME
Definition: win32.h:133
#define VM_ASSERT(expr)
Definition: vm_core.h:53
#define malloc
Definition: ripper.c:358
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4309
#define GVL_UNLOCK_END()
Definition: thread.c:151
unsigned int uintptr_t
Definition: win32.h:106
rb_nativethread_lock_t lock
unsigned long VALUE
Definition: ruby.h:85
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:84
VALUE rb_eNotImpError
Definition: error.c:811
VALUE first_proc
Definition: vm_core.h:841
void Init_native_thread(void)
RUBY_EXTERN size_t strlcpy(char *, const char *, size_t)
Definition: strlcpy.c:29
int clock_gettime(clockid_t, struct timespec *)
Definition: win32.c:4608
void ruby_init_stack(volatile VALUE *)
rb_nativethread_cond_t switch_wait_cond
#define ATOMIC_INC(var)
Definition: ruby_atomic.h:128
int rb_reserved_fd_p(int fd)
struct rb_execution_context_struct::@143 machine
register unsigned int len
Definition: zonetab.h:51
VALUE(* first_func)(ANYARGS)
Definition: vm_core.h:843
int rb_cloexec_pipe(int fildes[2])
Definition: io.c:325
#define RSTRING_PTR(str)
Definition: ruby.h:975
#define thread_start_func_2(th, st, rst)
Definition: thread.c:284
int size
Definition: encoding.c:57
struct rb_unblock_callback unblock
Definition: vm_core.h:835
#define rb_fd_select(n, rfds, wfds, efds, timeout)
Definition: intern.h:359
rb_nativethread_id_t thread_id
Definition: vm_core.h:808
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
#define O_NONBLOCK
Definition: win32.h:590
#define ETIMEDOUT
Definition: win32.h:549
native_thread_data_t native_thread_data
Definition: vm_core.h:816
#define EWOULDBLOCK
Definition: rubysocket.h:128
size_t thread_machine_stack_size
Definition: vm_core.h:583
pthread_cond_t cond
int rb_atomic_t
Definition: ruby_atomic.h:120
rb_execution_context_t ec
Definition: vm_core.h:790
rb_global_vm_lock_t gvl
Definition: vm_core.h:517
const char * name
Definition: nkf.c:208
RUBY_SYMBOL_EXPORT_BEGIN void * alloca()
#define RUBY_VM_INTERRUPTED(th)
Definition: vm_core.h:1609
#define fill_thread_id_str(th)
Definition: thread.c:278
#define RB_NATIVETHREAD_COND_INIT
#define RB_NATIVETHREAD_LOCK_INIT
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:834
#define NULL
Definition: _sdbm.c:102
free(psz)
struct rb_vm_struct::@140 default_params
VALUE rb_eThreadError
Definition: eval.c:857
char * strrchr(const char *, const char)
#define IS_STACK_DIR_UPPER()
Definition: gc.h:87
#define SIGNED_VALUE
Definition: ruby.h:87
#define GET_VM()
Definition: vm_core.h:1582