Ruby  2.5.0dev(2017-10-22revision60238)
cont.c
Go to the documentation of this file.
1 /**********************************************************************
2 
3  cont.c -
4 
5  $Author$
6  created at: Thu May 23 09:03:43 2007
7 
8  Copyright (C) 2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #include "internal.h"
13 #include "vm_core.h"
14 #include "gc.h"
15 #include "eval_intern.h"
16 
17 /* FIBER_USE_NATIVE enables Fiber performance improvement using system
18  * dependent method such as make/setcontext on POSIX system or
19  * CreateFiber() API on Windows.
20  * This hack make Fiber context switch faster (x2 or more).
21  * However, it decrease maximum number of Fiber. For example, on the
22  * 32bit POSIX OS, ten or twenty thousands Fiber can be created.
23  *
24  * Details is reported in the paper "A Fast Fiber Implementation for Ruby 1.9"
25  * in Proc. of 51th Programming Symposium, pp.21--28 (2010) (in Japanese).
26  */
27 
28 #if !defined(FIBER_USE_NATIVE)
29 # if defined(HAVE_GETCONTEXT) && defined(HAVE_SETCONTEXT)
30 # if 0
31 # elif defined(__NetBSD__)
32 /* On our experience, NetBSD doesn't support using setcontext() and pthread
33  * simultaneously. This is because pthread_self(), TLS and other information
34  * are represented by stack pointer (higher bits of stack pointer).
35  * TODO: check such constraint on configure.
36  */
37 # define FIBER_USE_NATIVE 0
38 # elif defined(__sun)
39 /* On Solaris because resuming any Fiber caused SEGV, for some reason.
40  */
41 # define FIBER_USE_NATIVE 0
42 # elif defined(__ia64)
43 /* At least, Linux/ia64's getcontext(3) doesn't save register window.
44  */
45 # define FIBER_USE_NATIVE 0
46 # elif defined(__GNU__)
47 /* GNU/Hurd doesn't fully support getcontext, setcontext, makecontext
48  * and swapcontext functions. Disabling their usage till support is
49  * implemented. More info at
50  * http://darnassus.sceen.net/~hurd-web/open_issues/glibc/#getcontext
51  */
52 # define FIBER_USE_NATIVE 0
53 # else
54 # define FIBER_USE_NATIVE 1
55 # endif
56 # elif defined(_WIN32)
57 # define FIBER_USE_NATIVE 1
58 # endif
59 #endif
60 #if !defined(FIBER_USE_NATIVE)
61 #define FIBER_USE_NATIVE 0
62 #endif
63 
64 #if FIBER_USE_NATIVE
65 #ifndef _WIN32
66 #include <unistd.h>
67 #include <sys/mman.h>
68 #include <ucontext.h>
69 #endif
70 #define RB_PAGE_SIZE (pagesize)
71 #define RB_PAGE_MASK (~(RB_PAGE_SIZE - 1))
72 static long pagesize;
73 #endif /*FIBER_USE_NATIVE*/
74 
75 #define CAPTURE_JUST_VALID_VM_STACK 1
76 
81 };
82 
85 #ifdef CAPTURE_JUST_VALID_VM_STACK
86  size_t slen; /* length of stack (head of th->ec.vm_stack) */
87  size_t clen; /* length of control frames (tail of th->ec.vm_stack) */
88 #endif
89 };
90 
91 typedef struct rb_context_struct {
92  enum context_type type;
93  int argc;
94  VALUE self;
96 
97  struct cont_saved_vm_stack saved_vm_stack;
98 
99  struct {
102  size_t stack_size;
103 #ifdef __ia64
104  VALUE *register_stack;
105  VALUE *register_stack_src;
106  int register_stack_size;
107 #endif
108  } machine;
114 } rb_context_t;
115 
116 
117 /*
118  * Fiber status:
119  * [Fiber.new] ------> FIBER_CREATED
120  * | [Fiber#resume]
121  * v
122  * +--> FIBER_RESUMED ----+
123  * [Fiber#resume] | | [Fiber.yield] |
124  * | v |
125  * +-- FIBER_SUSPENDED | [Terminate]
126  * |
127  * FIBER_TERMINATED <-+
128  */
134 };
135 
136 #define FIBER_CREATED_P(fib) ((fib)->status == FIBER_CREATED)
137 #define FIBER_RESUMED_P(fib) ((fib)->status == FIBER_RESUMED)
138 #define FIBER_SUSPENDED_P(fib) ((fib)->status == FIBER_SUSPENDED)
139 #define FIBER_TERMINATED_P(fib) ((fib)->status == FIBER_TERMINATED)
140 #define FIBER_RUNNABLE_P(fib) (FIBER_CREATED_P(fib) || FIBER_SUSPENDED_P(fib))
141 
142 #if FIBER_USE_NATIVE && !defined(_WIN32)
143 #define MAX_MACHINE_STACK_CACHE 10
144 static int machine_stack_cache_index = 0;
145 typedef struct machine_stack_cache_struct {
146  void *ptr;
147  size_t size;
148 } machine_stack_cache_t;
149 static machine_stack_cache_t machine_stack_cache[MAX_MACHINE_STACK_CACHE];
150 static machine_stack_cache_t terminated_machine_stack;
151 #endif
152 
157  const enum fiber_status status;
158  /* If a fiber invokes "transfer",
159  * then this fiber can't "resume" any more after that.
160  * You shouldn't mix "transfer" and "resume".
161  */
163 
164 #if FIBER_USE_NATIVE
165 #ifdef _WIN32
166  void *fib_handle;
167 #else
168  ucontext_t context;
169  /* Because context.uc_stack.ss_sp and context.uc_stack.ss_size
170  * are not necessarily valid after makecontext() or swapcontext(),
171  * they are saved in these variables for later use.
172  */
173  void *ss_sp;
174  size_t ss_size;
175 #endif
176 #endif
177 };
178 
179 static const char *
180 fiber_status_name(enum fiber_status s)
181 {
182  switch (s) {
183  case FIBER_CREATED: return "created";
184  case FIBER_RESUMED: return "resumed";
185  case FIBER_SUSPENDED: return "suspended";
186  case FIBER_TERMINATED: return "terminated";
187  }
188  VM_UNREACHABLE(fiber_status_name);
189  return NULL;
190 }
191 
192 static void
193 fiber_status_set(const rb_fiber_t *fib, enum fiber_status s)
194 {
195  if (0) fprintf(stderr, "fib: %p, status: %s -> %s\n", fib, fiber_status_name(fib->status), fiber_status_name(s));
197  VM_ASSERT(fib->status != s);
198  *((enum fiber_status *)&fib->status) = s;
199 }
200 
201 static const rb_data_type_t cont_data_type, fiber_data_type;
202 static VALUE rb_cContinuation;
203 static VALUE rb_cFiber;
204 static VALUE rb_eFiberError;
205 
206 #define GetContPtr(obj, ptr) \
207  TypedData_Get_Struct((obj), rb_context_t, &cont_data_type, (ptr))
208 
209 #define GetFiberPtr(obj, ptr) do {\
210  TypedData_Get_Struct((obj), rb_fiber_t, &fiber_data_type, (ptr)); \
211  if (!(ptr)) rb_raise(rb_eFiberError, "uninitialized fiber"); \
212 } while (0)
213 
214 NOINLINE(static VALUE cont_capture(volatile int *volatile stat));
215 
216 #define THREAD_MUST_BE_RUNNING(th) do { \
217  if (!(th)->ec.tag) rb_raise(rb_eThreadError, "not running thread"); \
218  } while (0)
219 
220 static VALUE
221 cont_thread_value(const rb_context_t *cont)
222 {
223  return cont->thread_value;
224 }
225 
226 static void
227 cont_mark(void *ptr)
228 {
229  rb_context_t *cont = ptr;
230 
231  RUBY_MARK_ENTER("cont");
232  rb_gc_mark(cont->value);
233 
235  rb_gc_mark(cont_thread_value(cont));
236 
237  if (cont->saved_vm_stack.ptr) {
238 #ifdef CAPTURE_JUST_VALID_VM_STACK
240  cont->saved_vm_stack.ptr + cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
241 #else
243  cont->saved_vm_stack.ptr, cont->saved_ec.stack_size);
244 #endif
245  }
246 
247  if (cont->machine.stack) {
248  if (cont->type == CONTINUATION_CONTEXT) {
249  /* cont */
251  cont->machine.stack + cont->machine.stack_size);
252  }
253  else {
254  /* fiber */
255  const rb_thread_t *th = rb_thread_ptr(cont_thread_value(cont));
256  const rb_fiber_t *fib = (rb_fiber_t*)cont;
257 
258  if ((th->ec.fiber != fib) && !FIBER_TERMINATED_P(fib)) {
260  cont->machine.stack + cont->machine.stack_size);
261  }
262  }
263  }
264 #ifdef __ia64
265  if (cont->machine.register_stack) {
266  rb_gc_mark_locations(cont->machine.register_stack,
267  cont->machine.register_stack + cont->machine.register_stack_size);
268  }
269 #endif
270 
271  RUBY_MARK_LEAVE("cont");
272 }
273 
274 static void
275 cont_free(void *ptr)
276 {
277  rb_context_t *cont = ptr;
278 
279  RUBY_FREE_ENTER("cont");
281 #if FIBER_USE_NATIVE
282  if (cont->type == CONTINUATION_CONTEXT) {
283  /* cont */
284  ruby_xfree(cont->ensure_array);
286  }
287  else {
288  /* fiber */
289  const rb_fiber_t *fib = (rb_fiber_t*)cont;
290  const rb_thread_t *const th = GET_THREAD();
291 #ifdef _WIN32
292  if (th && th->ec.fiber != fib && cont->type != ROOT_FIBER_CONTEXT) {
293  /* don't delete root fiber handle */
294  if (fib->fib_handle) {
295  DeleteFiber(fib->fib_handle);
296  }
297  }
298 #else /* not WIN32 */
299  if (th && th->ec.fiber != fib) {
300  if (fib->ss_sp) {
301  if (cont->type == ROOT_FIBER_CONTEXT) {
302  rb_bug("Illegal root fiber parameter");
303  }
304  munmap((void*)fib->ss_sp, fib->ss_size);
305  }
306  }
307  else {
308  /* It may reached here when finalize */
309  /* TODO examine whether it is a bug */
310  /* rb_bug("cont_free: release self"); */
311  }
312 #endif
313  }
314 #else /* not FIBER_USE_NATIVE */
315  ruby_xfree(cont->ensure_array);
317 #endif
318 #ifdef __ia64
319  RUBY_FREE_UNLESS_NULL(cont->machine.register_stack);
320 #endif
322 
323  /* free rb_cont_t or rb_fiber_t */
324  ruby_xfree(ptr);
325  RUBY_FREE_LEAVE("cont");
326 }
327 
328 static size_t
329 cont_memsize(const void *ptr)
330 {
331  const rb_context_t *cont = ptr;
332  size_t size = 0;
333 
334  size = sizeof(*cont);
335  if (cont->saved_vm_stack.ptr) {
336 #ifdef CAPTURE_JUST_VALID_VM_STACK
337  size_t n = (cont->saved_vm_stack.slen + cont->saved_vm_stack.clen);
338 #else
339  size_t n = cont->saved_ec.vm_stack_size;
340 #endif
341  size += n * sizeof(*cont->saved_vm_stack.ptr);
342  }
343 
344  if (cont->machine.stack) {
345  size += cont->machine.stack_size * sizeof(*cont->machine.stack);
346  }
347 #ifdef __ia64
348  if (cont->machine.register_stack) {
349  size += cont->machine.register_stack_size * sizeof(*cont->machine.register_stack);
350  }
351 #endif
352  return size;
353 }
354 
355 static void
356 fiber_verify(const rb_fiber_t *fib)
357 {
358 #if VM_CHECK_MODE > 0
359  switch (fib->status) {
360  case FIBER_RESUMED:
362  break;
363  case FIBER_SUSPENDED:
365  break;
366  case FIBER_CREATED:
367  case FIBER_TERMINATED:
368  /* TODO */
369  break;
370  default:
371  VM_UNREACHABLE(fiber_verify);
372  }
373 #endif
374 }
375 
376 void
378 {
379  if (fib)
380  rb_gc_mark(fib->cont.self);
381 }
382 
383 static void
384 fiber_mark(void *ptr)
385 {
386  rb_fiber_t *fib = ptr;
387  RUBY_MARK_ENTER("cont");
388  fiber_verify(fib);
389  rb_gc_mark(fib->first_proc);
390  rb_fiber_mark_self(fib->prev);
391  cont_mark(&fib->cont);
392  RUBY_MARK_LEAVE("cont");
393 }
394 
395 static void
396 fiber_free(void *ptr)
397 {
398  rb_fiber_t *fib = ptr;
399  RUBY_FREE_ENTER("fiber");
400  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
401  fib->cont.saved_ec.local_storage) {
403  }
404 
405  cont_free(&fib->cont);
406  RUBY_FREE_LEAVE("fiber");
407 }
408 
409 static size_t
410 fiber_memsize(const void *ptr)
411 {
412  const rb_fiber_t *fib = ptr;
413  size_t size = 0;
414 
415  size = sizeof(*fib);
416  if (fib->cont.type != ROOT_FIBER_CONTEXT &&
417  fib->cont.saved_ec.local_storage != NULL) {
418  size += st_memsize(fib->cont.saved_ec.local_storage);
419  }
420  size += cont_memsize(&fib->cont);
421  return size;
422 }
423 
424 VALUE
426 {
427  if (rb_typeddata_is_kind_of(obj, &fiber_data_type)) {
428  return Qtrue;
429  }
430  else {
431  return Qfalse;
432  }
433 }
434 
435 static void
436 cont_save_machine_stack(rb_thread_t *th, rb_context_t *cont)
437 {
438  size_t size;
439 
441 #ifdef __ia64
442  th->machine.register_stack_end = rb_ia64_bsp();
443 #endif
444 
445  if (th->ec.machine.stack_start > th->ec.machine.stack_end) {
446  size = cont->machine.stack_size = th->ec.machine.stack_start - th->ec.machine.stack_end;
447  cont->machine.stack_src = th->ec.machine.stack_end;
448  }
449  else {
450  size = cont->machine.stack_size = th->ec.machine.stack_end - th->ec.machine.stack_start;
451  cont->machine.stack_src = th->ec.machine.stack_start;
452  }
453 
454  if (cont->machine.stack) {
455  REALLOC_N(cont->machine.stack, VALUE, size);
456  }
457  else {
458  cont->machine.stack = ALLOC_N(VALUE, size);
459  }
460 
462  MEMCPY(cont->machine.stack, cont->machine.stack_src, VALUE, size);
463 
464 #ifdef __ia64
465  rb_ia64_flushrs();
466  size = cont->machine.register_stack_size = th->machine.register_stack_end - th->machine.register_stack_start;
467  cont->machine.register_stack_src = th->machine.register_stack_start;
468  if (cont->machine.register_stack) {
469  REALLOC_N(cont->machine.register_stack, VALUE, size);
470  }
471  else {
472  cont->machine.register_stack = ALLOC_N(VALUE, size);
473  }
474 
475  MEMCPY(cont->machine.register_stack, cont->machine.register_stack_src, VALUE, size);
476 #endif
477 }
478 
479 static const rb_data_type_t cont_data_type = {
480  "continuation",
481  {cont_mark, cont_free, cont_memsize,},
483 };
484 
485 static inline void
486 cont_save_thread(rb_context_t *cont, rb_thread_t *th)
487 {
488  rb_execution_context_t *sec = &cont->saved_ec;
489 
491 
492  /* save thread context */
493  *sec = th->ec;
494 
495  /* saved_thread->machine.stack_end should be NULL */
496  /* because it may happen GC afterward */
497  sec->machine.stack_end = NULL;
498 
499 #ifdef __ia64
500  sec->machine.register_stack_start = NULL;
501  sec->machine.register_stack_end = NULL;
502 #endif
503 }
504 
505 static void
506 cont_init(rb_context_t *cont, rb_thread_t *th)
507 {
508  /* save thread context */
509  cont_save_thread(cont, th);
510  cont->thread_value = th->self;
511  cont->saved_ec.local_storage = NULL;
514 }
515 
516 static rb_context_t *
517 cont_new(VALUE klass)
518 {
520  volatile VALUE contval;
521  rb_thread_t *th = GET_THREAD();
522 
524  contval = TypedData_Make_Struct(klass, rb_context_t, &cont_data_type, cont);
525  cont->self = contval;
526  cont_init(cont, th);
527  return cont;
528 }
529 
530 static VALUE
531 cont_capture(volatile int *volatile stat)
532 {
533  rb_context_t *volatile cont;
534  rb_thread_t *th = GET_THREAD();
535  volatile VALUE contval;
536  rb_execution_context_t *ec = &th->ec;
537 
540  cont = cont_new(rb_cContinuation);
541  contval = cont->self;
542 
543 #ifdef CAPTURE_JUST_VALID_VM_STACK
544  cont->saved_vm_stack.slen = ec->cfp->sp - ec->vm_stack;
545  cont->saved_vm_stack.clen = ec->vm_stack + ec->vm_stack_size - (VALUE*)ec->cfp;
549  (VALUE*)ec->cfp, VALUE, cont->saved_vm_stack.clen);
550 #else
553 #endif
554  cont->saved_ec.vm_stack = NULL;
555 
556  cont_save_machine_stack(th, cont);
557 
558  /* backup ensure_list to array for search in another context */
559  {
560  rb_ensure_list_t *p;
561  int size = 0;
562  rb_ensure_entry_t *entry;
563  for (p=th->ec.ensure_list; p; p=p->next)
564  size++;
565  entry = cont->ensure_array = ALLOC_N(rb_ensure_entry_t,size+1);
566  for (p=th->ec.ensure_list; p; p=p->next) {
567  if (!p->entry.marker)
568  p->entry.marker = rb_ary_tmp_new(0); /* dummy object */
569  *entry++ = p->entry;
570  }
571  entry->marker = 0;
572  }
573 
574  if (ruby_setjmp(cont->jmpbuf)) {
575  VALUE value;
576 
577  VAR_INITIALIZED(cont);
578  value = cont->value;
579  if (cont->argc == -1) rb_exc_raise(value);
580  cont->value = Qnil;
581  *stat = 1;
582  return value;
583  }
584  else {
585  *stat = 0;
586  return contval;
587  }
588 }
589 
590 static inline void
591 fiber_restore_thread(rb_thread_t *th, rb_fiber_t *fib)
592 {
593  th->ec = fib->cont.saved_ec;
594  fib->cont.saved_ec.vm_stack = NULL;
595 
596  VM_ASSERT(th->ec.vm_stack != NULL);
597 }
598 
599 static inline void
600 cont_restore_thread(rb_context_t *cont)
601 {
602  rb_thread_t *th = GET_THREAD();
603 
604  /* restore thread context */
605  if (cont->type == CONTINUATION_CONTEXT) {
606  /* continuation */
607  rb_execution_context_t *sec = &cont->saved_ec;
608  const rb_fiber_t *fib;
609 
610  fib = th->ec.fiber = sec->fiber;
611  if (fib == NULL) fib = th->root_fiber;
612 
613  if (fib && fib->cont.saved_ec.vm_stack) {
615  th->ec.vm_stack = fib->cont.saved_ec.vm_stack;
616  }
617 #ifdef CAPTURE_JUST_VALID_VM_STACK
619  MEMCPY(th->ec.vm_stack + sec->vm_stack_size - cont->saved_vm_stack.clen,
621 #else
623 #endif
624 
625  /* other members of ec */
626  th->ec.cfp = sec->cfp;
627  th->ec.safe_level = sec->safe_level;
628  th->ec.raised_flag = sec->raised_flag;
629  th->ec.tag = sec->tag;
630  th->ec.protect_tag = sec->protect_tag;
631  th->ec.root_lep = sec->root_lep;
632  th->ec.root_svar = sec->root_svar;
633  th->ec.ensure_list = sec->ensure_list;
634  th->ec.errinfo = sec->errinfo;
635  th->ec.trace_arg = sec->trace_arg;
636 
637  VM_ASSERT(th->ec.vm_stack != NULL);
638  }
639  else {
640  /* fiber */
641  fiber_restore_thread(th, (rb_fiber_t*)cont);
642  }
643 }
644 
645 #if FIBER_USE_NATIVE
646 #ifdef _WIN32
647 static void
648 fiber_set_stack_location(void)
649 {
650  rb_thread_t *th = GET_THREAD();
651  VALUE *ptr;
652 
653  SET_MACHINE_STACK_END(&ptr);
654  th->ec.machine.stack_start = (void*)(((VALUE)ptr & RB_PAGE_MASK) + STACK_UPPER((void *)&ptr, 0, RB_PAGE_SIZE));
655 }
656 
657 static VOID CALLBACK
658 fiber_entry(void *arg)
659 {
660  fiber_set_stack_location();
661  rb_fiber_start();
662 }
663 #else /* _WIN32 */
664 
665 /*
666  * FreeBSD require a first (i.e. addr) argument of mmap(2) is not NULL
667  * if MAP_STACK is passed.
668  * http://www.FreeBSD.org/cgi/query-pr.cgi?pr=158755
669  */
670 #if defined(MAP_STACK) && !defined(__FreeBSD__) && !defined(__FreeBSD_kernel__)
671 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON | MAP_STACK)
672 #else
673 #define FIBER_STACK_FLAGS (MAP_PRIVATE | MAP_ANON)
674 #endif
675 
676 static char*
677 fiber_machine_stack_alloc(size_t size)
678 {
679  char *ptr;
680 
681  if (machine_stack_cache_index > 0) {
682  if (machine_stack_cache[machine_stack_cache_index - 1].size == (size / sizeof(VALUE))) {
683  ptr = machine_stack_cache[machine_stack_cache_index - 1].ptr;
684  machine_stack_cache_index--;
685  machine_stack_cache[machine_stack_cache_index].ptr = NULL;
686  machine_stack_cache[machine_stack_cache_index].size = 0;
687  }
688  else{
689  /* TODO handle multiple machine stack size */
690  rb_bug("machine_stack_cache size is not canonicalized");
691  }
692  }
693  else {
694  void *page;
696 
697  errno = 0;
698  ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, FIBER_STACK_FLAGS, -1, 0);
699  if (ptr == MAP_FAILED) {
700  rb_raise(rb_eFiberError, "can't alloc machine stack to fiber: %s", strerror(errno));
701  }
702 
703  /* guard page setup */
704  page = ptr + STACK_DIR_UPPER(size - RB_PAGE_SIZE, 0);
705  if (mprotect(page, RB_PAGE_SIZE, PROT_NONE) < 0) {
706  rb_raise(rb_eFiberError, "mprotect failed");
707  }
708  }
709 
710  return ptr;
711 }
712 #endif
713 
714 static void
715 fiber_initialize_machine_stack_context(rb_fiber_t *fib, size_t size)
716 {
717  rb_execution_context_t *sec = &fib->cont.saved_ec;
718 
719 #ifdef _WIN32
720 # if defined(_MSC_VER) && _MSC_VER <= 1200
721 # define CreateFiberEx(cs, stacksize, flags, entry, param) \
722  CreateFiber((stacksize), (entry), (param))
723 # endif
724  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
725  if (!fib->fib_handle) {
726  /* try to release unnecessary fibers & retry to create */
727  rb_gc();
728  fib->fib_handle = CreateFiberEx(size - 1, size, 0, fiber_entry, NULL);
729  if (!fib->fib_handle) {
730  rb_raise(rb_eFiberError, "can't create fiber");
731  }
732  }
733  sec->machine.stack_maxsize = size;
734 #else /* not WIN32 */
735  ucontext_t *context = &fib->context;
736  char *ptr;
738 
739  getcontext(context);
740  ptr = fiber_machine_stack_alloc(size);
741  context->uc_link = NULL;
742  context->uc_stack.ss_sp = ptr;
743  context->uc_stack.ss_size = size;
744  fib->ss_sp = ptr;
745  fib->ss_size = size;
746  makecontext(context, rb_fiber_start, 0);
747  sec->machine.stack_start = (VALUE*)(ptr + STACK_DIR_UPPER(0, size));
748  sec->machine.stack_maxsize = size - RB_PAGE_SIZE;
749 #endif
750 #ifdef __ia64
751  sth->machine.register_stack_maxsize = sth->machine.stack_maxsize;
752 #endif
753 }
754 
755 NOINLINE(static void fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib));
756 
757 static void
758 fiber_setcontext(rb_fiber_t *newfib, rb_fiber_t *oldfib)
759 {
760  rb_thread_t *th = GET_THREAD();
761 
762  /* save oldfib's machine stack / TODO: is it needed? */
763  if (!FIBER_TERMINATED_P(oldfib)) {
766  if (STACK_DIR_UPPER(0, 1)) {
768  oldfib->cont.machine.stack = th->ec.machine.stack_end;
769  }
770  else {
772  oldfib->cont.machine.stack = th->ec.machine.stack_start;
773  }
774  }
775 
776  /* exchange machine_stack_start between oldfib and newfib */
778 
779  /* oldfib->machine.stack_end should be NULL */
780  oldfib->cont.saved_ec.machine.stack_end = NULL;
781 
782  /* restore thread context */
783  fiber_restore_thread(th, newfib);
784 
785 #ifndef _WIN32
786  if (!newfib->context.uc_stack.ss_sp && th->root_fiber != newfib) {
787  rb_bug("non_root_fiber->context.uc_stac.ss_sp should not be NULL");
788  }
789 #endif
790  /* swap machine context */
791 #ifdef _WIN32
792  SwitchToFiber(newfib->fib_handle);
793 #else
794  swapcontext(&oldfib->context, &newfib->context);
795 #endif
796 }
797 #endif
798 
799 NOINLINE(NORETURN(static void cont_restore_1(rb_context_t *)));
800 
801 static void
802 cont_restore_1(rb_context_t *cont)
803 {
804  cont_restore_thread(cont);
805 
806  /* restore machine stack */
807 #ifdef _M_AMD64
808  {
809  /* workaround for x64 SEH */
810  jmp_buf buf;
811  setjmp(buf);
812  ((_JUMP_BUFFER*)(&cont->jmpbuf))->Frame =
813  ((_JUMP_BUFFER*)(&buf))->Frame;
814  }
815 #endif
816  if (cont->machine.stack_src) {
818  MEMCPY(cont->machine.stack_src, cont->machine.stack,
819  VALUE, cont->machine.stack_size);
820  }
821 
822 #ifdef __ia64
823  if (cont->machine.register_stack_src) {
824  MEMCPY(cont->machine.register_stack_src, cont->machine.register_stack,
825  VALUE, cont->machine.register_stack_size);
826  }
827 #endif
828 
829  ruby_longjmp(cont->jmpbuf, 1);
830 }
831 
832 NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)));
833 
834 #ifdef __ia64
835 #define C(a) rse_##a##0, rse_##a##1, rse_##a##2, rse_##a##3, rse_##a##4
836 #define E(a) rse_##a##0= rse_##a##1= rse_##a##2= rse_##a##3= rse_##a##4
837 static volatile int C(a), C(b), C(c), C(d), C(e);
838 static volatile int C(f), C(g), C(h), C(i), C(j);
839 static volatile int C(k), C(l), C(m), C(n), C(o);
840 static volatile int C(p), C(q), C(r), C(s), C(t);
841 #if 0
842 {/* the above lines make cc-mode.el confused so much */}
843 #endif
844 int rb_dummy_false = 0;
845 NORETURN(NOINLINE(static void register_stack_extend(rb_context_t *, VALUE *, VALUE *)));
846 static void
847 register_stack_extend(rb_context_t *cont, VALUE *vp, VALUE *curr_bsp)
848 {
849  if (rb_dummy_false) {
850  /* use registers as much as possible */
851  E(a) = E(b) = E(c) = E(d) = E(e) =
852  E(f) = E(g) = E(h) = E(i) = E(j) =
853  E(k) = E(l) = E(m) = E(n) = E(o) =
854  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
855  E(a) = E(b) = E(c) = E(d) = E(e) =
856  E(f) = E(g) = E(h) = E(i) = E(j) =
857  E(k) = E(l) = E(m) = E(n) = E(o) =
858  E(p) = E(q) = E(r) = E(s) = E(t) = 0;
859  }
860  if (curr_bsp < cont->machine.register_stack_src+cont->machine.register_stack_size) {
861  register_stack_extend(cont, vp, (VALUE*)rb_ia64_bsp());
862  }
863  cont_restore_0(cont, vp);
864 }
865 #undef C
866 #undef E
867 #endif
868 
869 static void
870 cont_restore_0(rb_context_t *cont, VALUE *addr_in_prev_frame)
871 {
872  if (cont->machine.stack_src) {
873 #ifdef HAVE_ALLOCA
874 #define STACK_PAD_SIZE 1
875 #else
876 #define STACK_PAD_SIZE 1024
877 #endif
878  VALUE space[STACK_PAD_SIZE];
879 
880 #if !STACK_GROW_DIRECTION
881  if (addr_in_prev_frame > &space[0]) {
882  /* Stack grows downward */
883 #endif
884 #if STACK_GROW_DIRECTION <= 0
885  volatile VALUE *const end = cont->machine.stack_src;
886  if (&space[0] > end) {
887 # ifdef HAVE_ALLOCA
888  volatile VALUE *sp = ALLOCA_N(VALUE, &space[0] - end);
889  space[0] = *sp;
890 # else
891  cont_restore_0(cont, &space[0]);
892 # endif
893  }
894 #endif
895 #if !STACK_GROW_DIRECTION
896  }
897  else {
898  /* Stack grows upward */
899 #endif
900 #if STACK_GROW_DIRECTION >= 0
901  volatile VALUE *const end = cont->machine.stack_src + cont->machine.stack_size;
902  if (&space[STACK_PAD_SIZE] < end) {
903 # ifdef HAVE_ALLOCA
904  volatile VALUE *sp = ALLOCA_N(VALUE, end - &space[STACK_PAD_SIZE]);
905  space[0] = *sp;
906 # else
907  cont_restore_0(cont, &space[STACK_PAD_SIZE-1]);
908 # endif
909  }
910 #endif
911 #if !STACK_GROW_DIRECTION
912  }
913 #endif
914  }
915  cont_restore_1(cont);
916 }
917 #ifdef __ia64
918 #define cont_restore_0(cont, vp) register_stack_extend((cont), (vp), (VALUE*)rb_ia64_bsp())
919 #endif
920 
921 /*
922  * Document-class: Continuation
923  *
924  * Continuation objects are generated by Kernel#callcc,
925  * after having +require+d <i>continuation</i>. They hold
926  * a return address and execution context, allowing a nonlocal return
927  * to the end of the <code>callcc</code> block from anywhere within a
928  * program. Continuations are somewhat analogous to a structured
929  * version of C's <code>setjmp/longjmp</code> (although they contain
930  * more state, so you might consider them closer to threads).
931  *
932  * For instance:
933  *
934  * require "continuation"
935  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
936  * callcc{|cc| $cc = cc}
937  * puts(message = arr.shift)
938  * $cc.call unless message =~ /Max/
939  *
940  * <em>produces:</em>
941  *
942  * Freddie
943  * Herbie
944  * Ron
945  * Max
946  *
947  * Also you can call callcc in other methods:
948  *
949  * require "continuation"
950  *
951  * def g
952  * arr = [ "Freddie", "Herbie", "Ron", "Max", "Ringo" ]
953  * cc = callcc { |cc| cc }
954  * puts arr.shift
955  * return cc, arr.size
956  * end
957  *
958  * def f
959  * c, size = g
960  * c.call(c) if size > 1
961  * end
962  *
963  * f
964  *
965  * This (somewhat contrived) example allows the inner loop to abandon
966  * processing early:
967  *
968  * require "continuation"
969  * callcc {|cont|
970  * for i in 0..4
971  * print "\n#{i}: "
972  * for j in i*5...(i+1)*5
973  * cont.call() if j == 17
974  * printf "%3d", j
975  * end
976  * end
977  * }
978  * puts
979  *
980  * <em>produces:</em>
981  *
982  * 0: 0 1 2 3 4
983  * 1: 5 6 7 8 9
984  * 2: 10 11 12 13 14
985  * 3: 15 16
986  */
987 
988 /*
989  * call-seq:
990  * callcc {|cont| block } -> obj
991  *
992  * Generates a Continuation object, which it passes to
993  * the associated block. You need to <code>require
994  * 'continuation'</code> before using this method. Performing a
995  * <em>cont</em><code>.call</code> will cause the #callcc
996  * to return (as will falling through the end of the block). The
997  * value returned by the #callcc is the value of the
998  * block, or the value passed to <em>cont</em><code>.call</code>. See
999  * class Continuation for more details. Also see
1000  * Kernel#throw for an alternative mechanism for
1001  * unwinding a call stack.
1002  */
1003 
1004 static VALUE
1005 rb_callcc(VALUE self)
1006 {
1007  volatile int called;
1008  volatile VALUE val = cont_capture(&called);
1009 
1010  if (called) {
1011  return val;
1012  }
1013  else {
1014  return rb_yield(val);
1015  }
1016 }
1017 
1018 static VALUE
1019 make_passing_arg(int argc, const VALUE *argv)
1020 {
1021  switch (argc) {
1022  case 0:
1023  return Qnil;
1024  case 1:
1025  return argv[0];
1026  default:
1027  return rb_ary_new4(argc, argv);
1028  }
1029 }
1030 
1031 /* CAUTION!! : Currently, error in rollback_func is not supported */
1032 /* same as rb_protect if set rollback_func to NULL */
1033 void
1035 {
1036  st_table **table_p = &GET_VM()->ensure_rollback_table;
1037  if (UNLIKELY(*table_p == NULL)) {
1038  *table_p = st_init_numtable();
1039  }
1040  st_insert(*table_p, (st_data_t)ensure_func, (st_data_t)rollback_func);
1041 }
1042 
1043 static inline VALUE
1044 lookup_rollback_func(VALUE (*ensure_func)(ANYARGS))
1045 {
1046  st_table *table = GET_VM()->ensure_rollback_table;
1047  st_data_t val;
1048  if (table && st_lookup(table, (st_data_t)ensure_func, &val))
1049  return (VALUE) val;
1050  return Qundef;
1051 }
1052 
1053 
1054 static inline void
1055 rollback_ensure_stack(VALUE self,rb_ensure_list_t *current,rb_ensure_entry_t *target)
1056 {
1057  rb_ensure_list_t *p;
1058  rb_ensure_entry_t *entry;
1059  size_t i;
1060  size_t cur_size;
1061  size_t target_size;
1062  size_t base_point;
1063  VALUE (*func)(ANYARGS);
1064 
1065  cur_size = 0;
1066  for (p=current; p; p=p->next)
1067  cur_size++;
1068  target_size = 0;
1069  for (entry=target; entry->marker; entry++)
1070  target_size++;
1071 
1072  /* search common stack point */
1073  p = current;
1074  base_point = cur_size;
1075  while (base_point) {
1076  if (target_size >= base_point &&
1077  p->entry.marker == target[target_size - base_point].marker)
1078  break;
1079  base_point --;
1080  p = p->next;
1081  }
1082 
1083  /* rollback function check */
1084  for (i=0; i < target_size - base_point; i++) {
1085  if (!lookup_rollback_func(target[i].e_proc)) {
1086  rb_raise(rb_eRuntimeError, "continuation called from out of critical rb_ensure scope");
1087  }
1088  }
1089  /* pop ensure stack */
1090  while (cur_size > base_point) {
1091  /* escape from ensure block */
1092  (*current->entry.e_proc)(current->entry.data2);
1093  current = current->next;
1094  cur_size--;
1095  }
1096  /* push ensure stack */
1097  while (i--) {
1098  func = (VALUE (*)(ANYARGS)) lookup_rollback_func(target[i].e_proc);
1099  if ((VALUE)func != Qundef) {
1100  (*func)(target[i].data2);
1101  }
1102  }
1103 }
1104 
1105 /*
1106  * call-seq:
1107  * cont.call(args, ...)
1108  * cont[args, ...]
1109  *
1110  * Invokes the continuation. The program continues from the end of the
1111  * <code>callcc</code> block. If no arguments are given, the original
1112  * <code>callcc</code> returns <code>nil</code>. If one argument is
1113  * given, <code>callcc</code> returns it. Otherwise, an array
1114  * containing <i>args</i> is returned.
1115  *
1116  * callcc {|cont| cont.call } #=> nil
1117  * callcc {|cont| cont.call 1 } #=> 1
1118  * callcc {|cont| cont.call 1, 2, 3 } #=> [1, 2, 3]
1119  */
1120 
1121 static VALUE
1122 rb_cont_call(int argc, VALUE *argv, VALUE contval)
1123 {
1124  rb_context_t *cont;
1125  rb_thread_t *th = GET_THREAD();
1126  GetContPtr(contval, cont);
1127 
1128  if (cont_thread_value(cont) != th->self) {
1129  rb_raise(rb_eRuntimeError, "continuation called across threads");
1130  }
1131  if (cont->saved_ec.protect_tag != th->ec.protect_tag) {
1132  rb_raise(rb_eRuntimeError, "continuation called across stack rewinding barrier");
1133  }
1134  if (cont->saved_ec.fiber) {
1135  if (th->ec.fiber != cont->saved_ec.fiber) {
1136  rb_raise(rb_eRuntimeError, "continuation called across fiber");
1137  }
1138  }
1139  rollback_ensure_stack(contval, th->ec.ensure_list, cont->ensure_array);
1140 
1141  cont->argc = argc;
1142  cont->value = make_passing_arg(argc, argv);
1143 
1144  cont_restore_0(cont, &contval);
1145  return Qnil; /* unreachable */
1146 }
1147 
1148 /*********/
1149 /* fiber */
1150 /*********/
1151 
1152 /*
1153  * Document-class: Fiber
1154  *
1155  * Fibers are primitives for implementing light weight cooperative
1156  * concurrency in Ruby. Basically they are a means of creating code blocks
1157  * that can be paused and resumed, much like threads. The main difference
1158  * is that they are never preempted and that the scheduling must be done by
1159  * the programmer and not the VM.
1160  *
1161  * As opposed to other stackless light weight concurrency models, each fiber
1162  * comes with a stack. This enables the fiber to be paused from deeply
1163  * nested function calls within the fiber block. See the ruby(1)
1164  * manpage to configure the size of the fiber stack(s).
1165  *
1166  * When a fiber is created it will not run automatically. Rather it must
1167  * be explicitly asked to run using the <code>Fiber#resume</code> method.
1168  * The code running inside the fiber can give up control by calling
1169  * <code>Fiber.yield</code> in which case it yields control back to caller
1170  * (the caller of the <code>Fiber#resume</code>).
1171  *
1172  * Upon yielding or termination the Fiber returns the value of the last
1173  * executed expression
1174  *
1175  * For instance:
1176  *
1177  * fiber = Fiber.new do
1178  * Fiber.yield 1
1179  * 2
1180  * end
1181  *
1182  * puts fiber.resume
1183  * puts fiber.resume
1184  * puts fiber.resume
1185  *
1186  * <em>produces</em>
1187  *
1188  * 1
1189  * 2
1190  * FiberError: dead fiber called
1191  *
1192  * The <code>Fiber#resume</code> method accepts an arbitrary number of
1193  * parameters, if it is the first call to <code>resume</code> then they
1194  * will be passed as block arguments. Otherwise they will be the return
1195  * value of the call to <code>Fiber.yield</code>
1196  *
1197  * Example:
1198  *
1199  * fiber = Fiber.new do |first|
1200  * second = Fiber.yield first + 2
1201  * end
1202  *
1203  * puts fiber.resume 10
1204  * puts fiber.resume 14
1205  * puts fiber.resume 18
1206  *
1207  * <em>produces</em>
1208  *
1209  * 12
1210  * 14
1211  * FiberError: dead fiber called
1212  *
1213  */
1214 
1215 static const rb_data_type_t fiber_data_type = {
1216  "fiber",
1217  {fiber_mark, fiber_free, fiber_memsize,},
1218  0, 0, RUBY_TYPED_FREE_IMMEDIATELY
1219 };
1220 
1221 static VALUE
1222 fiber_alloc(VALUE klass)
1223 {
1224  return TypedData_Wrap_Struct(klass, &fiber_data_type, 0);
1225 }
1226 
1227 static rb_fiber_t*
1228 fiber_t_alloc(VALUE fibval)
1229 {
1230  rb_fiber_t *fib;
1231  rb_thread_t *th = GET_THREAD();
1232 
1233  if (DATA_PTR(fibval) != 0) {
1234  rb_raise(rb_eRuntimeError, "cannot initialize twice");
1235  }
1236 
1238  fib = ZALLOC(rb_fiber_t);
1239  fib->cont.self = fibval;
1240  fib->cont.type = FIBER_CONTEXT;
1241  cont_init(&fib->cont, th);
1242  fib->cont.saved_ec.fiber = fib;
1243  fib->prev = NULL;
1244 
1245  /* fib->status == 0 == CREATED
1246  * So that we don't need to set status: fiber_status_set(fib, FIBER_CREATED); */
1247  VM_ASSERT(FIBER_CREATED_P(fib));
1248 
1249  DATA_PTR(fibval) = fib;
1250 
1251  return fib;
1252 }
1253 
1256  const rb_iseq_t *iseq,
1257  VALUE type,
1258  VALUE self,
1259  VALUE specval,
1260  VALUE cref_or_me,
1261  const VALUE *pc,
1262  VALUE *sp,
1263  int local_size,
1264  int stack_max);
1265 
1266 static VALUE
1267 fiber_init(VALUE fibval, VALUE proc)
1268 {
1269  rb_fiber_t *fib = fiber_t_alloc(fibval);
1270  rb_context_t *cont = &fib->cont;
1271  rb_execution_context_t *sec = &cont->saved_ec;
1272  rb_thread_t *cth = GET_THREAD();
1273 
1274  /* initialize cont */
1275  cont->saved_vm_stack.ptr = NULL;
1276 
1277  sec->vm_stack = NULL;
1278  sec->vm_stack_size = 0;
1279 
1280  sec->vm_stack_size = cth->vm->default_params.fiber_vm_stack_size / sizeof(VALUE);
1281  sec->vm_stack = ALLOC_N(VALUE, sec->vm_stack_size);
1282  sec->cfp = (void *)(sec->vm_stack + sec->vm_stack_size);
1283 
1284  rb_vm_push_frame(sec,
1285  NULL,
1287  Qnil, /* self */
1289  0, /* specval */
1290  NULL, /* pc */
1291  sec->vm_stack, /* sp */
1292  0, /* local_size */
1293  0);
1294 
1295  sec->tag = NULL;
1296  sec->local_storage = NULL;
1299 
1300  fib->first_proc = proc;
1301 
1302 #if !FIBER_USE_NATIVE
1303  MEMCPY(&cont->jmpbuf, &cth->root_jmpbuf, rb_jmpbuf_t, 1);
1304 #endif
1305 
1306  return fibval;
1307 }
1308 
1309 /* :nodoc: */
1310 static VALUE
1311 rb_fiber_init(VALUE fibval)
1312 {
1313  return fiber_init(fibval, rb_block_proc());
1314 }
1315 
1316 VALUE
1318 {
1319  return fiber_init(fiber_alloc(rb_cFiber), rb_proc_new(func, obj));
1320 }
1321 
1322 static void rb_fiber_terminate(rb_fiber_t *fib);
1323 
1324 void
1326 {
1327  rb_thread_t *th = GET_THREAD();
1328  rb_fiber_t *fib = th->ec.fiber;
1329  rb_proc_t *proc;
1330  enum ruby_tag_type state;
1331 
1332  VM_ASSERT(FIBER_RESUMED_P(fib));
1333 
1334  TH_PUSH_TAG(th);
1335  if ((state = EXEC_TAG()) == TAG_NONE) {
1336  rb_context_t *cont = &VAR_FROM_MEMORY(fib)->cont;
1337  int argc;
1338  const VALUE *argv, args = cont->value;
1339  GetProcPtr(fib->first_proc, proc);
1340  argv = (argc = cont->argc) > 1 ? RARRAY_CONST_PTR(args) : &args;
1341  cont->value = Qnil;
1342  th->ec.errinfo = Qnil;
1344  th->ec.root_svar = Qfalse;
1345 
1346  EXEC_EVENT_HOOK(th, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1347  cont->value = rb_vm_invoke_proc(th, proc, argc, argv, VM_BLOCK_HANDLER_NONE);
1348  }
1349  TH_POP_TAG();
1350 
1351  if (state) {
1352  VM_ASSERT(FIBER_RESUMED_P(fib));
1353 
1354  if (state == TAG_RAISE || state == TAG_FATAL) {
1356  }
1357  else {
1359  if (!NIL_P(err))
1361  }
1363  }
1364 
1365  rb_fiber_terminate(fib);
1367 }
1368 
1369 static rb_fiber_t *
1370 root_fiber_alloc(rb_thread_t *th)
1371 {
1372  rb_fiber_t *fib;
1373  /* no need to allocate vm stack */
1374  fib = fiber_t_alloc(fiber_alloc(rb_cFiber));
1375  fib->cont.type = ROOT_FIBER_CONTEXT;
1376 #if FIBER_USE_NATIVE
1377 #ifdef _WIN32
1378  fib->fib_handle = ConvertThreadToFiber(0);
1379 #endif
1380 #endif
1381  fiber_status_set(fib, FIBER_RESUMED); /* skip CREATED */
1382  th->root_fiber = th->ec.fiber = fib;
1383  return fib;
1384 }
1385 
1386 static inline rb_fiber_t*
1387 fiber_current(void)
1388 {
1389  rb_thread_t *th = GET_THREAD();
1390  if (th->ec.fiber == NULL) {
1391  rb_fiber_t *fib = root_fiber_alloc(th);
1392  /* Running thread object has stack management responsibility */
1393  fib->cont.saved_ec.vm_stack = NULL;
1394  }
1395  return th->ec.fiber;
1396 }
1397 
1398 static inline rb_fiber_t*
1399 return_fiber(void)
1400 {
1401  rb_fiber_t *fib = fiber_current();
1402  rb_fiber_t *prev = fib->prev;
1403 
1404  if (!prev) {
1405  rb_fiber_t *root_fiber = GET_THREAD()->root_fiber;
1406 
1407  if (root_fiber == fib) {
1408  rb_raise(rb_eFiberError, "can't yield from root fiber");
1409  }
1410  return root_fiber;
1411  }
1412  else {
1413  fib->prev = NULL;
1414  return prev;
1415  }
1416 }
1417 
1418 VALUE
1420 {
1421  return fiber_current()->cont.self;
1422 }
1423 
1424 static inline VALUE
1425 fiber_store(rb_fiber_t *next_fib, rb_thread_t *th)
1426 {
1427  rb_fiber_t *fib;
1428 
1429  if (th->ec.fiber != NULL) {
1430  fib = th->ec.fiber;
1431  cont_save_thread(&fib->cont, th);
1432  }
1433  else {
1434  /* create root fiber */
1435  fib = root_fiber_alloc(th);
1436  }
1437 
1439  VM_ASSERT(FIBER_RUNNABLE_P(next_fib));
1440 
1441 #if FIBER_USE_NATIVE
1442  if (FIBER_CREATED_P(next_fib)) {
1443  fiber_initialize_machine_stack_context(next_fib, th->vm->default_params.fiber_machine_stack_size);
1444  }
1445 #endif
1446 
1447  if (FIBER_RESUMED_P(fib)) fiber_status_set(fib, FIBER_SUSPENDED);
1448 
1449 #if FIBER_USE_NATIVE == 0
1450  /* should (re-)allocate stack are before fib->status change to pass fiber_verify() */
1451  cont_save_machine_stack(th, &fib->cont);
1452 #endif
1453 
1454  fiber_status_set(next_fib, FIBER_RESUMED);
1455 
1456 #if FIBER_USE_NATIVE
1457  fiber_setcontext(next_fib, fib);
1458  /* restored */
1459 #ifndef _WIN32
1460  if (terminated_machine_stack.ptr) {
1461  if (machine_stack_cache_index < MAX_MACHINE_STACK_CACHE) {
1462  machine_stack_cache[machine_stack_cache_index].ptr = terminated_machine_stack.ptr;
1463  machine_stack_cache[machine_stack_cache_index].size = terminated_machine_stack.size;
1464  machine_stack_cache_index++;
1465  }
1466  else {
1467  if (terminated_machine_stack.ptr != fib->cont.machine.stack) {
1468  munmap((void*)terminated_machine_stack.ptr, terminated_machine_stack.size * sizeof(VALUE));
1469  }
1470  else {
1471  rb_bug("terminated fiber resumed");
1472  }
1473  }
1474  terminated_machine_stack.ptr = NULL;
1475  terminated_machine_stack.size = 0;
1476  }
1477 #endif /* not _WIN32 */
1478  fib = th->ec.fiber;
1479  if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1480  return fib->cont.value;
1481 
1482 #else /* FIBER_USE_NATIVE */
1483  if (ruby_setjmp(fib->cont.jmpbuf)) {
1484  /* restored */
1485  fib = th->ec.fiber;
1486  if (fib->cont.argc == -1) rb_exc_raise(fib->cont.value);
1487  if (next_fib->cont.value == Qundef) {
1488  cont_restore_0(&next_fib->cont, &next_fib->cont.value);
1489  VM_UNREACHABLE(fiber_store);
1490  }
1491  return fib->cont.value;
1492  }
1493  else {
1494  VALUE undef = Qundef;
1495  cont_restore_0(&next_fib->cont, &undef);
1496  VM_UNREACHABLE(fiber_store);
1497  }
1498 #endif /* FIBER_USE_NATIVE */
1499 }
1500 
1501 static inline VALUE
1502 fiber_switch(rb_fiber_t *fib, int argc, const VALUE *argv, int is_resume)
1503 {
1504  VALUE value;
1505  rb_context_t *cont = &fib->cont;
1506  rb_thread_t *th = GET_THREAD();
1507 
1508  if (th->ec.fiber == fib) {
1509  /* ignore fiber context switch
1510  * because destination fiber is same as current fiber
1511  */
1512  return make_passing_arg(argc, argv);
1513  }
1514 
1515  if (cont_thread_value(cont) != th->self) {
1516  rb_raise(rb_eFiberError, "fiber called across threads");
1517  }
1518  else if (cont->saved_ec.protect_tag != th->ec.protect_tag) {
1519  rb_raise(rb_eFiberError, "fiber called across stack rewinding barrier");
1520  }
1521  else if (FIBER_TERMINATED_P(fib)) {
1522  value = rb_exc_new2(rb_eFiberError, "dead fiber called");
1523 
1524  if (!FIBER_TERMINATED_P(th->ec.fiber)) {
1525  rb_exc_raise(value);
1526  VM_UNREACHABLE(fiber_switch);
1527  }
1528  else {
1529  /* th->ec.fiber is also dead => switch to root fiber */
1530  /* (this means we're being called from rb_fiber_terminate, */
1531  /* and the terminated fiber's return_fiber() is already dead) */
1533 
1534  cont = &th->root_fiber->cont;
1535  cont->argc = -1;
1536  cont->value = value;
1537 #if FIBER_USE_NATIVE
1538  fiber_setcontext(th->root_fiber, th->ec.fiber);
1539 #else
1540  cont_restore_0(cont, &value);
1541 #endif
1542  VM_UNREACHABLE(fiber_switch);
1543  }
1544  }
1545 
1546  if (is_resume) {
1547  fib->prev = fiber_current();
1548  }
1549 
1551 
1552  cont->argc = argc;
1553  cont->value = make_passing_arg(argc, argv);
1554  value = fiber_store(fib, th);
1555  RUBY_VM_CHECK_INTS(th);
1556 
1557  EXEC_EVENT_HOOK(th, RUBY_EVENT_FIBER_SWITCH, th->self, 0, 0, 0, Qnil);
1558 
1559  return value;
1560 }
1561 
1562 VALUE
1563 rb_fiber_transfer(VALUE fibval, int argc, const VALUE *argv)
1564 {
1565  rb_fiber_t *fib;
1566  GetFiberPtr(fibval, fib);
1567  return fiber_switch(fib, argc, argv, 0);
1568 }
1569 
1570 static void
1571 rb_fiber_terminate(rb_fiber_t *fib)
1572 {
1573  VALUE value = fib->cont.value;
1574  VM_ASSERT(FIBER_RESUMED_P(fib));
1575 
1576  fiber_status_set(fib, FIBER_TERMINATED);
1577 #if FIBER_USE_NATIVE && !defined(_WIN32)
1578  /* Ruby must not switch to other thread until storing terminated_machine_stack */
1579  terminated_machine_stack.ptr = fib->ss_sp;
1580  terminated_machine_stack.size = fib->ss_size / sizeof(VALUE);
1581  fib->ss_sp = NULL;
1582  fib->context.uc_stack.ss_sp = NULL;
1583  fib->cont.machine.stack = NULL;
1584  fib->cont.machine.stack_size = 0;
1585 #endif
1586  fiber_switch(return_fiber(), 1, &value, 0);
1587 }
1588 
1589 VALUE
1590 rb_fiber_resume(VALUE fibval, int argc, const VALUE *argv)
1591 {
1592  rb_fiber_t *fib;
1593  GetFiberPtr(fibval, fib);
1594 
1595  if (fib->prev != 0 || fib->cont.type == ROOT_FIBER_CONTEXT) {
1596  rb_raise(rb_eFiberError, "double resume");
1597  }
1598  if (fib->transferred != 0) {
1599  rb_raise(rb_eFiberError, "cannot resume transferred Fiber");
1600  }
1601 
1602  return fiber_switch(fib, argc, argv, 1);
1603 }
1604 
1605 VALUE
1606 rb_fiber_yield(int argc, const VALUE *argv)
1607 {
1608  return fiber_switch(return_fiber(), argc, argv, 0);
1609 }
1610 
1611 void
1613 {
1614  rb_thread_t *th = rb_thread_ptr(thval);
1615 
1616  if (th->root_fiber && th->root_fiber != th->ec.fiber) {
1618  }
1619 }
1620 
1621 /*
1622  * call-seq:
1623  * fiber.alive? -> true or false
1624  *
1625  * Returns true if the fiber can still be resumed (or transferred
1626  * to). After finishing execution of the fiber block this method will
1627  * always return false. You need to <code>require 'fiber'</code>
1628  * before using this method.
1629  */
1630 VALUE
1632 {
1633  const rb_fiber_t *fib;
1634  GetFiberPtr(fibval, fib);
1635  return FIBER_TERMINATED_P(fib) ? Qfalse : Qtrue;
1636 }
1637 
1638 /*
1639  * call-seq:
1640  * fiber.resume(args, ...) -> obj
1641  *
1642  * Resumes the fiber from the point at which the last <code>Fiber.yield</code>
1643  * was called, or starts running it if it is the first call to
1644  * <code>resume</code>. Arguments passed to resume will be the value of
1645  * the <code>Fiber.yield</code> expression or will be passed as block
1646  * parameters to the fiber's block if this is the first <code>resume</code>.
1647  *
1648  * Alternatively, when resume is called it evaluates to the arguments passed
1649  * to the next <code>Fiber.yield</code> statement inside the fiber's block
1650  * or to the block value if it runs to completion without any
1651  * <code>Fiber.yield</code>
1652  */
1653 static VALUE
1654 rb_fiber_m_resume(int argc, VALUE *argv, VALUE fib)
1655 {
1656  return rb_fiber_resume(fib, argc, argv);
1657 }
1658 
1659 /*
1660  * call-seq:
1661  * fiber.transfer(args, ...) -> obj
1662  *
1663  * Transfer control to another fiber, resuming it from where it last
1664  * stopped or starting it if it was not resumed before. The calling
1665  * fiber will be suspended much like in a call to
1666  * <code>Fiber.yield</code>. You need to <code>require 'fiber'</code>
1667  * before using this method.
1668  *
1669  * The fiber which receives the transfer call is treats it much like
1670  * a resume call. Arguments passed to transfer are treated like those
1671  * passed to resume.
1672  *
1673  * You cannot resume a fiber that transferred control to another one.
1674  * This will cause a double resume error. You need to transfer control
1675  * back to this fiber before it can yield and resume.
1676  *
1677  * Example:
1678  *
1679  * fiber1 = Fiber.new do
1680  * puts "In Fiber 1"
1681  * Fiber.yield
1682  * end
1683  *
1684  * fiber2 = Fiber.new do
1685  * puts "In Fiber 2"
1686  * fiber1.transfer
1687  * puts "Never see this message"
1688  * end
1689  *
1690  * fiber3 = Fiber.new do
1691  * puts "In Fiber 3"
1692  * end
1693  *
1694  * fiber2.resume
1695  * fiber3.resume
1696  *
1697  * <em>produces</em>
1698  *
1699  * In fiber 2
1700  * In fiber 1
1701  * In fiber 3
1702  *
1703  */
1704 static VALUE
1705 rb_fiber_m_transfer(int argc, VALUE *argv, VALUE fibval)
1706 {
1707  rb_fiber_t *fib;
1708  GetFiberPtr(fibval, fib);
1709  fib->transferred = 1;
1710  return fiber_switch(fib, argc, argv, 0);
1711 }
1712 
1713 /*
1714  * call-seq:
1715  * Fiber.yield(args, ...) -> obj
1716  *
1717  * Yields control back to the context that resumed the fiber, passing
1718  * along any arguments that were passed to it. The fiber will resume
1719  * processing at this point when <code>resume</code> is called next.
1720  * Any arguments passed to the next <code>resume</code> will be the
1721  * value that this <code>Fiber.yield</code> expression evaluates to.
1722  */
1723 static VALUE
1724 rb_fiber_s_yield(int argc, VALUE *argv, VALUE klass)
1725 {
1726  return rb_fiber_yield(argc, argv);
1727 }
1728 
1729 /*
1730  * call-seq:
1731  * Fiber.current() -> fiber
1732  *
1733  * Returns the current fiber. You need to <code>require 'fiber'</code>
1734  * before using this method. If you are not running in the context of
1735  * a fiber this method will return the root fiber.
1736  */
1737 static VALUE
1738 rb_fiber_s_current(VALUE klass)
1739 {
1740  return rb_fiber_current();
1741 }
1742 
1743 /*
1744  * call-seq:
1745  * fiber.to_s -> string
1746  *
1747  * Returns fiber information string.
1748  *
1749  */
1750 
1751 static VALUE
1752 fiber_to_s(VALUE fibval)
1753 {
1754  const rb_fiber_t *fib;
1755  const rb_proc_t *proc;
1756  char status_info[0x10];
1757 
1758  GetFiberPtr(fibval, fib);
1759  snprintf(status_info, 0x10, " (%s)", fiber_status_name(fib->status));
1760  if (!rb_obj_is_proc(fib->first_proc)) {
1761  VALUE str = rb_any_to_s(fibval);
1762  strlcat(status_info, ">", sizeof(status_info));
1763  rb_str_set_len(str, RSTRING_LEN(str)-1);
1764  rb_str_cat_cstr(str, status_info);
1765  return str;
1766  }
1767  GetProcPtr(fib->first_proc, proc);
1768  return rb_block_to_s(fibval, &proc->block, status_info);
1769 }
1770 
1771 /*
1772  * Document-class: FiberError
1773  *
1774  * Raised when an invalid operation is attempted on a Fiber, in
1775  * particular when attempting to call/resume a dead fiber,
1776  * attempting to yield from the root fiber, or calling a fiber across
1777  * threads.
1778  *
1779  * fiber = Fiber.new{}
1780  * fiber.resume #=> nil
1781  * fiber.resume #=> FiberError: dead fiber called
1782  */
1783 
1784 void
1786 {
1787 #if FIBER_USE_NATIVE
1788  rb_thread_t *th = GET_THREAD();
1789 
1790 #ifdef _WIN32
1791  SYSTEM_INFO info;
1792  GetSystemInfo(&info);
1793  pagesize = info.dwPageSize;
1794 #else /* not WIN32 */
1795  pagesize = sysconf(_SC_PAGESIZE);
1796 #endif
1798 #endif
1799 
1800  rb_cFiber = rb_define_class("Fiber", rb_cObject);
1801  rb_define_alloc_func(rb_cFiber, fiber_alloc);
1802  rb_eFiberError = rb_define_class("FiberError", rb_eStandardError);
1803  rb_define_singleton_method(rb_cFiber, "yield", rb_fiber_s_yield, -1);
1804  rb_define_method(rb_cFiber, "initialize", rb_fiber_init, 0);
1805  rb_define_method(rb_cFiber, "resume", rb_fiber_m_resume, -1);
1806  rb_define_method(rb_cFiber, "to_s", fiber_to_s, 0);
1807  rb_define_alias(rb_cFiber, "inspect", "to_s");
1808 }
1809 
1811 
1812 void
1814 {
1815  rb_cContinuation = rb_define_class("Continuation", rb_cObject);
1816  rb_undef_alloc_func(rb_cContinuation);
1817  rb_undef_method(CLASS_OF(rb_cContinuation), "new");
1818  rb_define_method(rb_cContinuation, "call", rb_cont_call, -1);
1819  rb_define_method(rb_cContinuation, "[]", rb_cont_call, -1);
1820  rb_define_global_function("callcc", rb_callcc, 0);
1821 }
1822 
1823 void
1825 {
1826  rb_define_method(rb_cFiber, "transfer", rb_fiber_m_transfer, -1);
1827  rb_define_method(rb_cFiber, "alive?", rb_fiber_alive_p, 0);
1828  rb_define_singleton_method(rb_cFiber, "current", rb_fiber_s_current, 0);
1829 }
1830 
void rb_gc(void)
Definition: gc.c:6727
#define RUBY_VM_CHECK_INTS(th)
Definition: vm_core.h:1627
rb_vm_t * vm
Definition: vm_core.h:788
const VALUE * root_lep
Definition: vm_core.h:760
struct rb_ensure_entry entry
Definition: vm_core.h:733
#define rb_exc_new2
Definition: intern.h:243
void rb_bug(const char *fmt,...)
Definition: error.c:521
#define THREAD_MUST_BE_RUNNING(th)
Definition: cont.c:216
#define GetContPtr(obj, ptr)
Definition: cont.c:206
#define ruby_longjmp(env, val)
Definition: eval_intern.h:60
ruby_tag_type
Definition: vm_core.h:151
VALUE rb_block_to_s(VALUE self, const struct rb_block *block, const char *additional_info)
Definition: proc.c:1248
#define RUBY_TYPED_FREE_IMMEDIATELY
Definition: ruby.h:1138
#define FIBER_RUNNABLE_P(fib)
Definition: cont.c:140
Definition: st.h:79
#define RUBY_VM_SET_INTERRUPT(th)
Definition: vm_core.h:1606
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:764
rb_control_frame_t * cfp
Definition: vm_core.h:744
void rb_undef_alloc_func(VALUE)
Definition: vm_method.c:675
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
Definition: class.c:1716
#define TAG_NONE
Definition: vm_core.h:164
VALUE first_proc
Definition: cont.c:155
#define GetProcPtr(obj, ptr)
Definition: vm_core.h:907
VALUE self
Definition: cont.c:94
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:757
#define FLUSH_REGISTER_WINDOWS
Definition: defines.h:296
#define CLASS_OF(v)
Definition: ruby.h:453
void rb_raise(VALUE exc, const char *fmt,...)
Definition: error.c:2284
#define Qtrue
Definition: ruby.h:437
VALUE rb_fiber_resume(VALUE fibval, int argc, const VALUE *argv)
Definition: cont.c:1590
#define TypedData_Wrap_Struct(klass, data_type, sval)
Definition: ruby.h:1162
void rb_fiber_reset_root_local_storage(VALUE thval)
Definition: cont.c:1612
#define GetFiberPtr(obj, ptr)
Definition: cont.c:209
VALUE data2
Definition: vm_core.h:728
size_t clen
Definition: cont.c:87
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE passed_block_handler)
Definition: vm.c:1172
enum context_type type
Definition: cont.c:92
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1135
size_t fiber_machine_stack_size
Definition: vm_core.h:585
VALUE rb_ary_tmp_new(long capa)
Definition: array.c:544
rb_ensure_list_t * ensure_list
Definition: vm_core.h:767
#define STACK_UPPER(x, a, b)
Definition: gc.h:77
void rb_str_set_len(VALUE, long)
Definition: string.c:2627
VALUE rb_fiber_alive_p(VALUE fibval)
Definition: cont.c:1631
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1595
#define RUBY_MARK_LEAVE(msg)
Definition: gc.h:54
VALUE rb_fiber_current(void)
Definition: cont.c:1419
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
rb_fiber_t * root_fiber
Definition: vm_core.h:852
#define DATA_PTR(dta)
Definition: ruby.h:1106
void rb_gc_mark(VALUE ptr)
Definition: gc.c:4464
#define st_lookup
Definition: regint.h:185
VALUE * ptr
Definition: cont.c:84
rb_jmpbuf_t jmpbuf
Definition: cont.c:110
void rb_define_global_function(const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a global function.
Definition: class.c:1745
struct cont_saved_vm_stack saved_vm_stack
Definition: cont.c:97
Definition: vm_core.h:725
VALUE value
Definition: cont.c:95
void rb_undef_method(VALUE klass, const char *name)
Definition: class.c:1533
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Definition: gc.c:4081
struct rb_context_struct rb_context_t
#define GET_THREAD()
Definition: vm_core.h:1583
VALUE * stack
Definition: cont.c:100
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:759
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
Definition: st.h:22
#define TH_POP_TAG()
Definition: eval_intern.h:138
#define UNLIKELY(x)
Definition: internal.h:43
enum fiber_status status
Definition: cont.c:157
#define ALLOC_N(type, n)
Definition: ruby.h:1587
#define EXEC_TAG()
Definition: eval_intern.h:201
#define val
RUBY_EXTERN VALUE rb_cObject
Definition: ruby.h:1893
rb_ensure_entry_t * ensure_array
Definition: cont.c:111
size_t st_memsize(const st_table *tab)
Definition: st.c:676
#define FIBER_SUSPENDED_P(fib)
Definition: cont.c:138
size_t fiber_vm_stack_size
Definition: vm_core.h:584
VALUE(* e_proc)(ANYARGS)
Definition: vm_core.h:727
VALUE rb_any_to_s(VALUE)
call-seq: obj.to_s -> string
Definition: object.c:631
#define snprintf
Definition: subst.h:6
#define NIL_P(v)
Definition: ruby.h:451
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition: class.c:646
#define RUBY_MARK_ENTER(msg)
Definition: gc.h:53
VALUE rb_fiber_new(VALUE(*func)(ANYARGS), VALUE obj)
Definition: cont.c:1317
#define FIBER_RESUMED_P(fib)
Definition: cont.c:137
int argc
Definition: ruby.c:187
#define Qfalse
Definition: ruby.h:436
void ruby_Init_Fiber_as_Coroutine(void)
Definition: cont.c:1824
void rb_fiber_start(void)
Definition: cont.c:1325
#define ALLOCA_N(type, n)
Definition: ruby.h:1593
#define MEMCPY(p1, p2, type, n)
Definition: ruby.h:1661
size_t slen
Definition: cont.c:86
#define rb_ary_new4
Definition: intern.h:92
int err
Definition: win32.c:135
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition: class.c:1758
#define ZALLOC(type)
Definition: ruby.h:1590
#define RSTRING_LEN(str)
Definition: ruby.h:971
VALUE rb_yield(VALUE)
Definition: vm_eval.c:973
int transferred
Definition: cont.c:162
#define RARRAY_CONST_PTR(a)
Definition: ruby.h:1021
#define REALLOC_N(var, type, n)
Definition: ruby.h:1591
int errno
#define STACK_DIR_UPPER(a, b)
Definition: gc.h:85
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:116
void rb_vm_stack_to_heap(rb_thread_t *th)
Definition: vm.c:731
#define VM_ASSERT(expr)
Definition: vm_core.h:53
RUBY_SYMBOL_EXPORT_BEGIN void ruby_Init_Continuation_body(void)
Definition: cont.c:1813
VALUE rb_fiber_yield(int argc, const VALUE *argv)
Definition: cont.c:1606
#define RUBY_SYMBOL_EXPORT_END
Definition: missing.h:49
void ruby_xfree(void *x)
Definition: gc.c:8085
void Init_Cont(void)
Definition: cont.c:1785
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4309
#define TAG_FATAL
Definition: vm_core.h:172
#define Qnil
Definition: ruby.h:438
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:615
#define FIBER_TERMINATED_P(fib)
Definition: cont.c:139
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:747
VALUE rb_eStandardError
Definition: error.c:799
unsigned long VALUE
Definition: ruby.h:85
#define STACK_GROW_DIR_DETECTION
Definition: gc.h:84
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:565
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_)
Definition: vm_core.h:1686
rb_context_t cont
Definition: cont.c:154
RUBY_JMP_BUF rb_jmpbuf_t
Definition: vm_core.h:690
#define FIBER_CREATED_P(fib)
Definition: cont.c:136
#define RUBY_SYMBOL_EXPORT_BEGIN
Definition: missing.h:48
#define TH_PUSH_TAG(th)
Definition: eval_intern.h:131
rb_control_frame_t * rb_vm_push_frame(rb_execution_context_t *sec, const rb_iseq_t *iseq, VALUE type, VALUE self, VALUE specval, VALUE cref_or_me, const VALUE *pc, VALUE *sp, int local_size, int stack_max)
#define SET_MACHINE_STACK_END(p)
Definition: gc.h:11
struct rb_ensure_list * next
Definition: vm_core.h:732
struct rb_execution_context_struct::@143 machine
#define ruby_setjmp(env)
Definition: eval_intern.h:59
rb_execution_context_t saved_ec
Definition: cont.c:109
#define C(c, s)
enum rb_thread_status status
Definition: vm_core.h:812
#define RUBY_FREE_UNLESS_NULL(ptr)
Definition: gc.h:64
NOINLINE(static VALUE cont_capture(volatile int *volatile stat))
VALUE * stack_src
Definition: cont.c:101
int size
Definition: encoding.c:57
#define f
#define VAR_FROM_MEMORY(var)
Definition: eval_intern.h:155
VALUE rb_block_proc(void)
Definition: proc.c:780
#define st_init_numtable
Definition: regint.h:178
#define ANYARGS
Definition: defines.h:173
#define RUBY_FREE_LEAVE(msg)
Definition: gc.h:56
VALUE marker
Definition: vm_core.h:726
VALUE rb_eRuntimeError
Definition: error.c:800
#define RUBY_FREE_ENTER(msg)
Definition: gc.h:55
struct rb_vm_tag * tag
Definition: vm_core.h:746
#define VAR_INITIALIZED(var)
Definition: eval_intern.h:156
const struct rb_block block
Definition: vm_core.h:911
RUBY_EXTERN char * strerror(int)
Definition: strerror.c:11
VALUE rb_obj_is_fiber(VALUE obj)
Definition: cont.c:425
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2371
#define STACK_PAD_SIZE
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
Definition: proc.c:2649
VALUE rb_str_cat_cstr(VALUE, const char *)
Definition: string.c:2756
#define VM_UNREACHABLE(func)
Definition: vm_core.h:54
struct rb_context_struct::@3 machine
#define TypedData_Make_Struct(klass, type, data_type, sval)
Definition: ruby.h:1175
rb_jmpbuf_t root_jmpbuf
Definition: vm_core.h:853
#define st_insert
Definition: regint.h:184
rb_execution_context_t ec
Definition: vm_core.h:790
RUBY_EXTERN size_t strlcat(char *, const char *, size_t)
Definition: strlcat.c:31
#define RUBY_EVENT_FIBER_SWITCH
Definition: ruby.h:2095
#define st_free_table
Definition: regint.h:188
void rb_fiber_mark_self(const rb_fiber_t *fib)
Definition: cont.c:377
void ruby_register_rollback_func_for_ensure(VALUE(*ensure_func)(ANYARGS), VALUE(*rollback_func)(ANYARGS))
Definition: cont.c:1034
context_type
Definition: cont.c:77
size_t stack_size
Definition: cont.c:102
VALUE rb_vm_make_jump_tag_but_local_jump(int state, VALUE val)
Definition: vm.c:1421
#define stat(path, st)
Definition: win32.h:183
fiber_status
Definition: cont.c:129
#define TAG_RAISE
Definition: vm_core.h:170
NORETURN(NOINLINE(static void cont_restore_0(rb_context_t *, VALUE *)))
#define NULL
Definition: _sdbm.c:102
#define Qundef
Definition: ruby.h:439
struct rb_fiber_struct * prev
Definition: cont.c:156
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Definition: class.c:1515
struct rb_vm_struct::@140 default_params
VALUE thread_value
Definition: cont.c:113
char ** argv
Definition: ruby.c:188
rb_ensure_list_t * ensure_list
Definition: cont.c:112
VALUE rb_fiber_transfer(VALUE fibval, int argc, const VALUE *argv)
Definition: cont.c:1563
#define GET_VM()
Definition: vm_core.h:1582