12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION 16 #ifdef HAVE_SYS_RESOURCE_H 17 #include <sys/resource.h> 19 #ifdef HAVE_THR_STKSEGMENT 24 #elif HAVE_SYS_FCNTL_H 25 #include <sys/fcntl.h> 27 #ifdef HAVE_SYS_PRCTL_H 28 #include <sys/prctl.h> 30 #if defined(__native_client__) && defined(NACL_NEWLIB) 33 #if defined(HAVE_SYS_TIME_H) 36 #if defined(__HAIKU__) 37 #include <kernel/OS.h> 40 static void native_mutex_lock(rb_nativethread_lock_t *lock);
41 static void native_mutex_unlock(rb_nativethread_lock_t *lock);
42 static int native_mutex_trylock(rb_nativethread_lock_t *lock);
43 static void native_mutex_initialize(rb_nativethread_lock_t *lock);
44 static void native_mutex_destroy(rb_nativethread_lock_t *lock);
50 static void rb_thread_wakeup_timer_thread_low(
void);
55 #define TIMER_THREAD_CREATED_P() (timer_thread.created != 0) 57 #define RB_CONDATTR_CLOCK_MONOTONIC 1 59 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && defined(HAVE_CLOCKID_T) && \ 60 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \ 61 defined(HAVE_CLOCK_GETTIME) && defined(HAVE_PTHREAD_CONDATTR_INIT) 62 #define USE_MONOTONIC_COND 1 64 #define USE_MONOTONIC_COND 0 67 #if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && defined(F_SETFL) && defined(O_NONBLOCK) && !defined(__native_client__) 69 # define USE_SLEEPY_TIMER_THREAD 1 71 # define USE_SLEEPY_TIMER_THREAD 0 86 rb_thread_wakeup_timer_thread_low();
107 native_mutex_lock(&vm->
gvl.
lock);
108 gvl_acquire_common(vm);
109 native_mutex_unlock(&vm->
gvl.
lock);
113 gvl_release_common(
rb_vm_t *vm)
117 native_cond_signal(&vm->
gvl.
cond);
123 native_mutex_lock(&vm->
gvl.
lock);
124 gvl_release_common(vm);
125 native_mutex_unlock(&vm->
gvl.
lock);
131 native_mutex_lock(&vm->
gvl.
lock);
133 gvl_release_common(vm);
151 native_mutex_unlock(&vm->
gvl.
lock);
153 native_mutex_lock(&vm->
gvl.
lock);
158 gvl_acquire_common(vm);
159 native_mutex_unlock(&vm->
gvl.
lock);
165 native_mutex_initialize(&vm->
gvl.
lock);
166 native_cond_initialize(&vm->
gvl.
cond, RB_CONDATTR_CLOCK_MONOTONIC);
167 native_cond_initialize(&vm->
gvl.
switch_cond, RB_CONDATTR_CLOCK_MONOTONIC);
180 native_cond_destroy(&vm->
gvl.
cond);
181 native_mutex_destroy(&vm->
gvl.
lock);
184 #if defined(HAVE_WORKING_FORK) 193 #define NATIVE_MUTEX_LOCK_DEBUG 0 196 mutex_debug(
const char *msg,
void *lock)
198 if (NATIVE_MUTEX_LOCK_DEBUG) {
200 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
202 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
203 fprintf(stdout,
"%s: %p\n", msg, lock);
204 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(
EXIT_FAILURE);}
209 native_mutex_lock(pthread_mutex_t *lock)
212 mutex_debug(
"lock", lock);
213 if ((r = pthread_mutex_lock(lock)) != 0) {
219 native_mutex_unlock(pthread_mutex_t *lock)
222 mutex_debug(
"unlock", lock);
223 if ((r = pthread_mutex_unlock(lock)) != 0) {
229 native_mutex_trylock(pthread_mutex_t *lock)
232 mutex_debug(
"trylock", lock);
233 if ((r = pthread_mutex_trylock(lock)) != 0) {
245 native_mutex_initialize(pthread_mutex_t *lock)
247 int r = pthread_mutex_init(lock, 0);
248 mutex_debug(
"init", lock);
255 native_mutex_destroy(pthread_mutex_t *lock)
257 int r = pthread_mutex_destroy(lock);
258 mutex_debug(
"destroy", lock);
267 #ifdef HAVE_PTHREAD_COND_INIT 269 # if USE_MONOTONIC_COND 270 pthread_condattr_t attr;
272 pthread_condattr_init(&attr);
275 if (flags & RB_CONDATTR_CLOCK_MONOTONIC) {
282 r = pthread_cond_init(&cond->
cond, &attr);
283 pthread_condattr_destroy(&attr);
285 r = pthread_cond_init(&cond->
cond,
NULL);
298 #ifdef HAVE_PTHREAD_COND_INIT 299 int r = pthread_cond_destroy(&cond->
cond);
321 r = pthread_cond_signal(&cond->
cond);
322 }
while (r == EAGAIN);
333 r = pthread_cond_broadcast(&cond->
cond);
334 }
while (r == EAGAIN);
343 int r = pthread_cond_wait(&cond->
cond, mutex);
361 r = pthread_cond_timedwait(&cond->
cond, mutex, ts);
362 }
while (r == EINTR);
379 #if USE_MONOTONIC_COND 394 now.tv_sec = tv.tv_sec;
395 now.tv_nsec = tv.tv_usec * 1000;
397 #if USE_MONOTONIC_COND 400 timeout.tv_sec = now.tv_sec;
401 timeout.tv_nsec = now.tv_nsec;
402 timeout.tv_sec += timeout_rel.tv_sec;
403 timeout.tv_nsec += timeout_rel.tv_nsec;
405 if (timeout.tv_nsec >= 1000*1000*1000) {
407 timeout.tv_nsec -= 1000*1000*1000;
410 if (timeout.tv_sec < now.tv_sec)
411 timeout.tv_sec = TIMET_MAX;
416 #define native_cleanup_push pthread_cleanup_push 417 #define native_cleanup_pop pthread_cleanup_pop 418 #ifdef HAVE_SCHED_YIELD 419 #define native_thread_yield() (void)sched_yield() 421 #define native_thread_yield() ((void)0) 424 #if defined(SIGVTALRM) && !defined(__CYGWIN__) 425 #define USE_UBF_LIST 1 426 static rb_nativethread_lock_t ubf_list_lock;
429 static pthread_key_t ruby_native_thread_key;
438 ruby_thread_from_native(
void)
440 return pthread_getspecific(ruby_native_thread_key);
446 return pthread_setspecific(ruby_native_thread_key, th) == 0;
456 pthread_key_create(&ruby_native_thread_key,
NULL);
459 native_thread_init(th);
461 native_mutex_initialize(&ubf_list_lock);
463 #ifndef __native_client__ 464 posix_signal(SIGVTALRM, null_func);
476 native_cond_initialize(&nd->
sleep_cond, RB_CONDATTR_CLOCK_MONOTONIC);
477 ruby_thread_set_native(th);
486 #ifndef USE_THREAD_CACHE 487 #define USE_THREAD_CACHE 0 491 static rb_thread_t *register_cached_thread_and_wait(
void);
494 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP 495 #define STACKADDR_AVAILABLE 1 496 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP 497 #define STACKADDR_AVAILABLE 1 498 #undef MAINSTACKADDR_AVAILABLE 499 #define MAINSTACKADDR_AVAILABLE 1 500 void *pthread_get_stackaddr_np(pthread_t);
501 size_t pthread_get_stacksize_np(pthread_t);
502 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP 503 #define STACKADDR_AVAILABLE 1 504 #elif defined HAVE_PTHREAD_GETTHRDS_NP 505 #define STACKADDR_AVAILABLE 1 506 #elif defined __HAIKU__ 507 #define STACKADDR_AVAILABLE 1 508 #elif defined __ia64 && defined _HPUX_SOURCE 509 #include <sys/dyntune.h> 511 #define STACKADDR_AVAILABLE 1 518 #undef PTHREAD_STACK_MIN 520 #define HAVE_PTHREAD_ATTR_GET_NP 1 521 #undef HAVE_PTHREAD_ATTR_GETSTACK 528 #define pthread_attr_get_np(thid, attr) 0 537 hpux_attr_getstackaddr(
const pthread_attr_t *attr,
void **addr)
543 if (gettune(
"vps_pagesize", &pagesize)) {
548 pthread_attr_getstacksize(attr, &size);
549 *addr = (
void *)((
size_t)((
char *)_Asm_get_sp() -
size) & ~(pagesize - 1));
552 #define pthread_attr_getstackaddr(attr, addr) hpux_attr_getstackaddr(attr, addr) 555 #ifndef MAINSTACKADDR_AVAILABLE 556 # ifdef STACKADDR_AVAILABLE 557 # define MAINSTACKADDR_AVAILABLE 1 559 # define MAINSTACKADDR_AVAILABLE 0 562 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack) 563 # define get_main_stack(addr, size) get_stack(addr, size) 566 #ifdef STACKADDR_AVAILABLE 571 get_stack(
void **addr,
size_t *size)
573 #define CHECK_ERR(expr) \ 574 {int err = (expr); if (err) return err;} 575 #ifdef HAVE_PTHREAD_GETATTR_NP 579 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
580 # ifdef HAVE_PTHREAD_ATTR_GETSTACK 581 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
584 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
585 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
587 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
589 pthread_attr_destroy(&attr);
590 #elif defined HAVE_PTHREAD_ATTR_GET_NP 592 CHECK_ERR(pthread_attr_init(&attr));
593 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
594 # ifdef HAVE_PTHREAD_ATTR_GETSTACK 595 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
597 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
598 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
601 pthread_attr_destroy(&attr);
602 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) 603 pthread_t th = pthread_self();
604 *addr = pthread_get_stackaddr_np(th);
605 *size = pthread_get_stacksize_np(th);
606 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP 608 # if defined HAVE_THR_STKSEGMENT 609 CHECK_ERR(thr_stksegment(&stk));
611 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
615 #elif defined HAVE_PTHREAD_GETTHRDS_NP 616 pthread_t th = pthread_self();
617 struct __pthrdsinfo thinfo;
619 int regsiz=
sizeof(reg);
620 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
621 &thinfo,
sizeof(thinfo),
623 *addr = thinfo.__pi_stackaddr;
627 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
629 #elif defined __HAIKU__ 632 CHECK_ERR(get_thread_info(find_thread(
NULL), &info));
633 *addr = info.stack_base;
637 #error STACKADDR_AVAILABLE is defined but not implemented. 645 rb_nativethread_id_t
id;
646 size_t stack_maxsize;
649 VALUE *register_stack_start;
651 } native_main_thread;
653 #ifdef STACK_END_ADDRESS 654 extern void *STACK_END_ADDRESS;
658 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
659 RUBY_STACK_SPACE_RATIO = 5
663 space_size(
size_t stack_size)
665 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
666 if (space_size > RUBY_STACK_SPACE_LIMIT) {
667 return RUBY_STACK_SPACE_LIMIT;
675 static __attribute__((noinline))
void 676 reserve_stack(
volatile char *limit,
size_t size)
679 # error needs alloca() 682 volatile char buf[0x100];
683 enum {stack_check_margin = 0x1000};
687 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
690 if (size < stack_check_margin)
return;
691 size -= stack_check_margin;
695 const volatile char *end = buf +
sizeof(
buf);
705 size_t sz = limit - end;
720 size_t sz = buf - limit;
727 # define reserve_stack(limit, size) ((void)(limit), (void)(size)) 730 #undef ruby_init_stack 742 native_main_thread.id = pthread_self();
744 if (!native_main_thread.register_stack_start ||
745 (
VALUE*)bsp < native_main_thread.register_stack_start) {
746 native_main_thread.register_stack_start = (
VALUE*)bsp;
749 #if MAINSTACKADDR_AVAILABLE 750 if (native_main_thread.stack_maxsize)
return;
754 if (get_main_stack(&stackaddr, &size) == 0) {
755 native_main_thread.stack_maxsize =
size;
756 native_main_thread.stack_start = stackaddr;
757 reserve_stack(stackaddr, size);
762 #ifdef STACK_END_ADDRESS 763 native_main_thread.stack_start = STACK_END_ADDRESS;
765 if (!native_main_thread.stack_start ||
767 native_main_thread.stack_start > addr,
768 native_main_thread.stack_start < addr)) {
769 native_main_thread.stack_start = (
VALUE *)addr;
773 #if defined(HAVE_GETRLIMIT) 774 #if defined(PTHREAD_STACK_DEFAULT) 775 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5 776 # error "PTHREAD_STACK_DEFAULT is too small" 778 size_t size = PTHREAD_STACK_DEFAULT;
783 int pagesize = getpagesize();
786 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
787 size = (size_t)rlim.rlim_cur;
789 addr = native_main_thread.stack_start;
791 space = ((size_t)((
char *)addr +
size) / pagesize) * pagesize - (size_t)addr;
794 space = (size_t)addr - ((
size_t)((
char *)addr - size) / pagesize + 1) * pagesize;
796 native_main_thread.stack_maxsize = space;
800 #if MAINSTACKADDR_AVAILABLE 810 start = native_main_thread.stack_start;
811 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
814 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
815 end = native_main_thread.stack_start;
818 if ((
void *)addr < start || (void *)addr > end) {
820 native_main_thread.stack_start = (
VALUE *)addr;
821 native_main_thread.stack_maxsize = 0;
826 #define CHECK_ERR(expr) \ 827 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}} 832 rb_nativethread_id_t curr = pthread_self();
834 if (pthread_equal(curr, native_main_thread.id)) {
839 #ifdef STACKADDR_AVAILABLE 843 if (get_stack(&start, &size) == 0) {
847 #elif defined get_stack_of 857 th->
ec.
machine.register_stack_start = native_main_thread.register_stack_start;
865 #define USE_NATIVE_THREAD_INIT 1 869 thread_start_func_1(
void *th_ptr)
876 #if !defined USE_NATIVE_THREAD_INIT 881 #if defined USE_NATIVE_THREAD_INIT 882 native_thread_init_stack(th);
884 native_thread_init(th);
886 #if defined USE_NATIVE_THREAD_INIT 896 if ((th = register_cached_thread_and_wait()) != 0) {
906 struct cached_thread_entry {
909 struct cached_thread_entry *next;
915 struct cached_thread_entry *cached_thread_root;
918 register_cached_thread_and_wait(
void)
924 struct cached_thread_entry *entry =
925 (
struct cached_thread_entry *)
malloc(
sizeof(
struct cached_thread_entry));
932 ts.
tv_sec = tv.tv_sec + 60;
933 ts.
tv_nsec = tv.tv_usec * 1000;
935 native_mutex_lock(&thread_cache_lock);
937 entry->th_area = &th_area;
939 entry->next = cached_thread_root;
940 cached_thread_root = entry;
942 native_cond_timedwait(&cond, &thread_cache_lock, &ts);
945 struct cached_thread_entry *e, **prev = &cached_thread_root;
947 while ((e = *prev) != 0) {
957 native_cond_destroy(&cond);
959 native_mutex_unlock(&thread_cache_lock);
970 struct cached_thread_entry *entry;
972 if (cached_thread_root) {
973 native_mutex_lock(&thread_cache_lock);
974 entry = cached_thread_root;
976 if (cached_thread_root) {
977 cached_thread_root = entry->next;
978 *entry->th_area = th;
983 native_cond_signal(entry->cond);
985 native_mutex_unlock(&thread_cache_lock);
996 if (use_cached_thread(th)) {
997 thread_debug(
"create (use cached thread): %p\n", (
void *)th);
1000 #ifdef HAVE_PTHREAD_ATTR_INIT 1001 pthread_attr_t attr;
1002 pthread_attr_t *
const attrp = &attr;
1004 pthread_attr_t *
const attrp =
NULL;
1007 const size_t space = space_size(stack_size);
1015 #ifdef HAVE_PTHREAD_ATTR_INIT 1016 CHECK_ERR(pthread_attr_init(&attr));
1018 # ifdef PTHREAD_STACK_MIN 1019 thread_debug(
"create - stack size: %lu\n", (
unsigned long)stack_size);
1020 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
1023 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED 1024 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
1026 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
1031 err = pthread_create(&th->
thread_id, attrp, thread_start_func_1, th);
1043 #ifdef HAVE_PTHREAD_ATTR_INIT 1044 CHECK_ERR(pthread_attr_destroy(&attr));
1050 #if USE_SLEEPY_TIMER_THREAD 1052 native_thread_join(pthread_t th)
1054 int err = pthread_join(th, 0);
1062 #if USE_NATIVE_THREAD_PRIORITY 1067 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0) 1068 struct sched_param sp;
1072 pthread_getschedparam(th->
thread_id, &policy, &sp);
1073 max = sched_get_priority_max(policy);
1074 min = sched_get_priority_min(policy);
1076 if (min > priority) {
1079 else if (max < priority) {
1083 sp.sched_priority = priority;
1084 pthread_setschedparam(th->
thread_id, policy, &sp);
1095 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
1099 ubf_pthread_cond_signal(
void *ptr)
1102 thread_debug(
"ubf_pthread_cond_signal (%p)\n", (
void *)th);
1117 timeout_rel.tv_nsec = timeout_tv->
tv_usec * 1000;
1127 if (timeout_rel.tv_sec > 100000000) {
1128 timeout_rel.tv_sec = 100000000;
1129 timeout_rel.tv_nsec = 0;
1132 timeout = native_cond_timeout(cond, timeout_rel);
1137 native_mutex_lock(lock);
1143 thread_debug(
"native_sleep: interrupted before sleep\n");
1147 native_cond_wait(cond, lock);
1149 native_cond_timedwait(cond, lock, &timeout);
1154 native_mutex_unlock(lock);
1162 static LIST_HEAD(ubf_list_head);
1170 if (list_empty((
struct list_head*)node)) {
1171 native_mutex_lock(&ubf_list_lock);
1172 list_add(&ubf_list_head, node);
1173 native_mutex_unlock(&ubf_list_lock);
1183 if (!list_empty((
struct list_head*)node)) {
1184 native_mutex_lock(&ubf_list_lock);
1185 list_del_init(node);
1186 native_mutex_unlock(&ubf_list_lock);
1203 ubf_select(
void *ptr)
1206 register_ubf_list(th);
1215 if (!pthread_equal(pthread_self(), timer_thread.id))
1217 ubf_wakeup_thread(th);
1221 ubf_threads_empty(
void)
1223 return list_empty(&ubf_list_head);
1227 ubf_wakeup_all_threads(
void)
1231 if (!ubf_threads_empty()) {
1232 native_mutex_lock(&ubf_list_lock);
1233 list_for_each(&ubf_list_head, th,
1234 native_thread_data.ubf_list) {
1235 ubf_wakeup_thread(th);
1237 native_mutex_unlock(&ubf_list_lock);
1242 #define register_ubf_list(th) (void)(th) 1243 #define unregister_ubf_list(th) (void)(th) 1244 #define ubf_select 0 1245 static void ubf_wakeup_all_threads(
void) {
return; }
1246 static int ubf_threads_empty(
void) {
return 1; }
1250 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0) 1255 #define TIME_QUANTUM_USEC (100 * 1000) 1257 #if USE_SLEEPY_TIMER_THREAD 1267 volatile rb_pid_t owner_process;
1269 } timer_thread_pipe = {
1274 NORETURN(
static void async_bug_fd(
const char *mesg,
int errno_arg,
int fd));
1276 async_bug_fd(
const char *mesg,
int errno_arg,
int fd)
1279 size_t n =
strlcpy(buff, mesg,
sizeof(buff));
1280 if (n <
sizeof(buff)-3) {
1288 rb_thread_wakeup_timer_thread_fd(
volatile int *fdp)
1294 if (fd >= 0 && timer_thread_pipe.owner_process == getpid()) {
1295 static const char buff[1] = {
'!'};
1297 if ((result = write(fd, buff, 1)) <= 0) {
1300 case EINTR:
goto retry;
1302 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN 1307 async_bug_fd(
"rb_thread_wakeup_timer_thread: write", e, fd);
1310 if (TT_DEBUG)
WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1321 if (timer_thread_pipe.owner_process == getpid()) {
1323 rb_thread_wakeup_timer_thread_fd(&timer_thread_pipe.normal[1]);
1329 rb_thread_wakeup_timer_thread_low(
void)
1331 if (timer_thread_pipe.owner_process == getpid()) {
1333 rb_thread_wakeup_timer_thread_fd(&timer_thread_pipe.low[1]);
1340 consume_communication_pipe(
int fd)
1342 #define CCP_READ_BUFF_SIZE 1024 1344 static char buff[CCP_READ_BUFF_SIZE];
1348 result = read(fd, buff,
sizeof(buff));
1352 else if (result < 0) {
1358 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN 1363 async_bug_fd(
"consume_communication_pipe: read", e, fd);
1369 #define CLOSE_INVALIDATE(expr) \ 1370 close_invalidate(&timer_thread_pipe.expr,"close_invalidate: "#expr) 1372 close_invalidate(
volatile int *fdp,
const char *msg)
1377 if (close(fd) < 0) {
1378 async_bug_fd(msg,
errno, fd);
1383 set_nonblock(
int fd)
1388 oflags =
fcntl(fd, F_GETFL);
1398 setup_communication_pipe_internal(
int pipes[2])
1404 rb_warn(
"Failed to create communication pipe for timer thread: %s",
1410 set_nonblock(pipes[0]);
1411 set_nonblock(pipes[1]);
1417 setup_communication_pipe(
void)
1419 VM_ASSERT(timer_thread_pipe.owner_process == 0);
1420 VM_ASSERT(timer_thread_pipe.normal[0] == -1);
1421 VM_ASSERT(timer_thread_pipe.normal[1] == -1);
1422 VM_ASSERT(timer_thread_pipe.low[0] == -1);
1423 VM_ASSERT(timer_thread_pipe.low[1] == -1);
1425 if (setup_communication_pipe_internal(timer_thread_pipe.normal) < 0) {
1428 if (setup_communication_pipe_internal(timer_thread_pipe.low) < 0) {
1430 CLOSE_INVALIDATE(normal[0]);
1431 CLOSE_INVALIDATE(normal[1]);
1449 struct pollfd pollfds[2];
1451 pollfds[0].fd = timer_thread_pipe.normal[0];
1452 pollfds[0].events = POLLIN;
1453 pollfds[1].fd = timer_thread_pipe.low[0];
1454 pollfds[1].events = POLLIN;
1456 need_polling = !ubf_threads_empty();
1458 if (gvl->
waiting > 0 || need_polling) {
1460 result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
1464 result = poll(pollfds,
numberof(pollfds), -1);
1470 else if (result > 0) {
1471 consume_communication_pipe(timer_thread_pipe.normal[0]);
1472 consume_communication_pipe(timer_thread_pipe.low[0]);
1489 # define PER_NANO 1000000000 1491 static void rb_thread_wakeup_timer_thread_low(
void) {}
1493 static rb_nativethread_lock_t timer_thread_lock;
1501 ts.
tv_nsec = TIME_QUANTUM_USEC * 1000;
1502 ts = native_cond_timeout(&timer_thread_cond, ts);
1504 native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, &ts);
1508 #if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME) 1509 # define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name) 1515 #ifdef SET_CURRENT_THREAD_NAME 1537 if (len >=
sizeof(buf)) {
1538 buf[
sizeof(
buf)-2] =
'*';
1539 buf[
sizeof(
buf)-1] =
'\0';
1541 SET_CURRENT_THREAD_NAME(buf);
1548 native_set_another_thread_name(rb_nativethread_id_t thread_id,
VALUE name)
1550 #ifdef SET_ANOTHER_THREAD_NAME 1553 SET_ANOTHER_THREAD_NAME(thread_id, s);
1559 thread_timer(
void *p)
1563 if (TT_DEBUG)
WRITE_CONST(2,
"start timer thread\n");
1565 #ifdef SET_CURRENT_THREAD_NAME 1566 SET_CURRENT_THREAD_NAME(
"ruby-timer-thr");
1569 #if !USE_SLEEPY_TIMER_THREAD 1570 native_mutex_initialize(&timer_thread_lock);
1571 native_cond_initialize(&timer_thread_cond, RB_CONDATTR_CLOCK_MONOTONIC);
1572 native_mutex_lock(&timer_thread_lock);
1574 while (system_working > 0) {
1577 ubf_wakeup_all_threads();
1578 timer_thread_function(0);
1583 timer_thread_sleep(gvl);
1585 #if USE_SLEEPY_TIMER_THREAD 1586 CLOSE_INVALIDATE(normal[0]);
1587 CLOSE_INVALIDATE(low[0]);
1589 native_mutex_unlock(&timer_thread_lock);
1590 native_cond_destroy(&timer_thread_cond);
1591 native_mutex_destroy(&timer_thread_lock);
1594 if (TT_DEBUG)
WRITE_CONST(2,
"finish timer thread\n");
1599 rb_thread_create_timer_thread(
void)
1601 if (!timer_thread.created) {
1603 #ifdef HAVE_PTHREAD_ATTR_INIT 1604 pthread_attr_t attr;
1607 err = pthread_attr_init(&attr);
1609 rb_warn(
"pthread_attr_init failed for timer: %s, scheduling broken",
1613 # ifdef PTHREAD_STACK_MIN 1615 const size_t min_size = (4096 * 4);
1622 #if defined HAVE_VALGRIND_MEMCHECK_H && defined __APPLE__ 1628 size_t stack_size = PTHREAD_STACK_MIN;
1629 if (stack_size < min_size) stack_size = min_size;
1630 if (needs_more_stack) stack_size += BUFSIZ;
1631 pthread_attr_setstacksize(&attr, stack_size);
1636 #if USE_SLEEPY_TIMER_THREAD 1637 err = setup_communication_pipe();
1639 rb_warn(
"pipe creation failed for timer: %s, scheduling broken",
1646 if (timer_thread.created) {
1647 rb_bug(
"rb_thread_create_timer_thread: Timer thread was already created\n");
1649 #ifdef HAVE_PTHREAD_ATTR_INIT 1650 err = pthread_create(&timer_thread.id, &attr, thread_timer, &vm->
gvl);
1651 pthread_attr_destroy(&attr);
1653 if (err == EINVAL) {
1660 err = pthread_create(&timer_thread.id,
NULL, thread_timer, &vm->
gvl);
1663 err = pthread_create(&timer_thread.id,
NULL, thread_timer, &vm->
gvl);
1666 rb_warn(
"pthread_create failed for timer: %s, scheduling broken",
1668 #if USE_SLEEPY_TIMER_THREAD 1669 CLOSE_INVALIDATE(normal[0]);
1670 CLOSE_INVALIDATE(normal[1]);
1671 CLOSE_INVALIDATE(low[0]);
1672 CLOSE_INVALIDATE(low[1]);
1678 timer_thread_pipe.owner_process = getpid();
1679 timer_thread.created = 1;
1684 native_stop_timer_thread(
void)
1687 stopped = --system_working <= 0;
1689 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
1690 #if USE_SLEEPY_TIMER_THREAD 1693 timer_thread_pipe.owner_process = 0;
1701 native_thread_yield();
1705 CLOSE_INVALIDATE(normal[1]);
1706 CLOSE_INVALIDATE(low[1]);
1709 native_thread_join(timer_thread.id);
1712 VM_ASSERT(timer_thread_pipe.normal[0] == -1);
1713 VM_ASSERT(timer_thread_pipe.low[0] == -1);
1715 if (TT_DEBUG) fprintf(stderr,
"joined timer thread\n");
1716 timer_thread.created = 0;
1723 native_reset_timer_thread(
void)
1725 if (TT_DEBUG) fprintf(stderr,
"reset timer thread\n");
1728 #ifdef HAVE_SIGALTSTACK 1730 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
1734 const size_t water_mark = 1024 * 1024;
1737 #ifdef STACKADDR_AVAILABLE 1738 if (get_stack(&base, &size) == 0) {
1740 if (pthread_equal(th->
thread_id, native_main_thread.id)) {
1742 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
1743 size = (size_t)rlim.rlim_cur;
1758 size /= RUBY_STACK_SPACE_RATIO;
1759 if (size > water_mark) size = water_mark;
1761 if (size > ~(
size_t)base+1) size = ~(
size_t)base+1;
1762 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
1765 if (size > (
size_t)base) size = (
size_t)base;
1766 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
1775 #if USE_SLEEPY_TIMER_THREAD 1776 if ((fd == timer_thread_pipe.normal[0] ||
1777 fd == timer_thread_pipe.normal[1] ||
1778 fd == timer_thread_pipe.low[0] ||
1779 fd == timer_thread_pipe.low[1]) &&
1780 timer_thread_pipe.owner_process == getpid()) {
1791 rb_nativethread_id_t
1794 return pthread_self();
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
rb_nativethread_cond_t sleep_cond
RUBY_SYMBOL_EXPORT_BEGIN rb_nativethread_id_t rb_nativethread_self()
#define WRITE_CONST(fd, str)
void rb_warn(const char *fmt,...)
void rb_bug(const char *fmt,...)
int gettimeofday(struct timeval *, struct timezone *)
void rb_update_max_fd(int fd)
volatile unsigned long waiting
void rb_raise(VALUE exc, const char *fmt,...)
rb_unblock_function_t * func
rb_nativethread_cond_t switch_cond
#define STACK_UPPER(x, a, b)
#define cond(node, column)
void rb_gc_force_recycle(VALUE obj)
#define thread_id_str(th)
unsigned long long uint64_t
#define RUBY_VM_THREAD_VM_STACK_SIZE
void rb_thread_wakeup_timer_thread(void)
void rb_bug_errno(const char *mesg, int errno_arg)
void rb_async_bug_errno(const char *mesg, int errno_arg)
VALUE rb_proc_location(VALUE self)
#define GVL_UNLOCK_BEGIN()
rb_nativethread_cond_t cond
#define ATOMIC_CAS(var, oldval, newval)
void rb_sys_fail(const char *mesg)
struct list_node ubf_list
#define RARRAY_CONST_PTR(a)
#define STACK_DIR_UPPER(a, b)
unsigned char buf[MIME_BUF_SIZE]
rb_nativethread_lock_t lock
#define STACK_GROW_DIR_DETECTION
void Init_native_thread(void)
RUBY_EXTERN size_t strlcpy(char *, const char *, size_t)
int clock_gettime(clockid_t, struct timespec *)
void ruby_init_stack(volatile VALUE *)
rb_nativethread_cond_t switch_wait_cond
int rb_reserved_fd_p(int fd)
struct rb_execution_context_struct::@143 machine
register unsigned int len
VALUE(* first_func)(ANYARGS)
int rb_cloexec_pipe(int fildes[2])
#define thread_start_func_2(th, st, rst)
struct rb_unblock_callback unblock
#define rb_fd_select(n, rfds, wfds, efds, timeout)
rb_nativethread_id_t thread_id
RUBY_EXTERN char * strerror(int)
native_thread_data_t native_thread_data
size_t thread_machine_stack_size
rb_execution_context_t ec
RUBY_SYMBOL_EXPORT_BEGIN void * alloca()
#define RUBY_VM_INTERRUPTED(th)
#define fill_thread_id_str(th)
#define RB_NATIVETHREAD_COND_INIT
#define RB_NATIVETHREAD_LOCK_INIT
rb_nativethread_lock_t interrupt_lock
struct rb_vm_struct::@140 default_params
char * strrchr(const char *, const char)
#define IS_STACK_DIR_UPPER()