Ruby  2.5.0dev(2017-10-22revision60238)
vm_core.h
Go to the documentation of this file.
1 /**********************************************************************
2 
3  vm_core.h -
4 
5  $Author$
6  created at: 04/01/01 19:41:38 JST
7 
8  Copyright (C) 2004-2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #ifndef RUBY_VM_CORE_H
13 #define RUBY_VM_CORE_H
14 
15 /*
16  * Enable check mode.
17  * 1: enable local assertions.
18  */
19 #ifndef VM_CHECK_MODE
20 #define VM_CHECK_MODE 0
21 #endif
22 
36 #ifndef VMDEBUG
37 #define VMDEBUG 0
38 #endif
39 
40 #if 0
41 #undef VMDEBUG
42 #define VMDEBUG 3
43 #endif
44 
45 #include "ruby_assert.h"
46 
47 #if VM_CHECK_MODE > 0
48 #define VM_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr)
49 
50 #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
51 
52 #else
53 #define VM_ASSERT(expr) ((void)0)
54 #define VM_UNREACHABLE(func) UNREACHABLE
55 #endif
56 
57 #define RUBY_VM_THREAD_MODEL 2
58 
59 #include "ruby/ruby.h"
60 #include "ruby/st.h"
61 
62 #include "node.h"
63 #include "vm_debug.h"
64 #include "vm_opts.h"
65 #include "id.h"
66 #include "method.h"
67 #include "ruby_atomic.h"
68 #include "ccan/list/list.h"
69 
70 #include "ruby/thread_native.h"
71 #if defined(_WIN32)
72 #include "thread_win32.h"
73 #elif defined(HAVE_PTHREAD_H)
74 #include "thread_pthread.h"
75 #endif
76 
77 #ifndef ENABLE_VM_OBJSPACE
78 #ifdef _WIN32
79 /*
80  * TODO: object space independent st_table.
81  * socklist and conlist will be freed exit_handler(), after object
82  * space destruction.
83  */
84 #define ENABLE_VM_OBJSPACE 0
85 #else
86 #define ENABLE_VM_OBJSPACE 1
87 #endif
88 #endif
89 
90 #include <setjmp.h>
91 #include <signal.h>
92 
93 #ifndef NSIG
94 # define NSIG (_SIGMAX + 1) /* For QNX */
95 #endif
96 
97 #define RUBY_NSIG NSIG
98 
99 #ifdef HAVE_STDARG_PROTOTYPES
100 #include <stdarg.h>
101 #define va_init_list(a,b) va_start((a),(b))
102 #else
103 #include <varargs.h>
104 #define va_init_list(a,b) va_start((a))
105 #endif
106 
107 #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
108 #define USE_SIGALTSTACK
109 #endif
110 
111 /*****************/
112 /* configuration */
113 /*****************/
114 
115 /* gcc ver. check */
116 #if defined(__GNUC__) && __GNUC__ >= 2
117 
118 #if OPT_TOKEN_THREADED_CODE
119 #if OPT_DIRECT_THREADED_CODE
120 #undef OPT_DIRECT_THREADED_CODE
121 #endif
122 #endif
123 
124 #else /* defined(__GNUC__) && __GNUC__ >= 2 */
125 
126 /* disable threaded code options */
127 #if OPT_DIRECT_THREADED_CODE
128 #undef OPT_DIRECT_THREADED_CODE
129 #endif
130 #if OPT_TOKEN_THREADED_CODE
131 #undef OPT_TOKEN_THREADED_CODE
132 #endif
133 #endif
134 
135 #ifdef __native_client__
136 #undef OPT_DIRECT_THREADED_CODE
137 #endif
138 
139 /* call threaded code */
140 #if OPT_CALL_THREADED_CODE
141 #if OPT_DIRECT_THREADED_CODE
142 #undef OPT_DIRECT_THREADED_CODE
143 #endif /* OPT_DIRECT_THREADED_CODE */
144 #if OPT_STACK_CACHING
145 #undef OPT_STACK_CACHING
146 #endif /* OPT_STACK_CACHING */
147 #endif /* OPT_CALL_THREADED_CODE */
148 
149 typedef unsigned long rb_num_t;
150 
162 };
163 
164 #define TAG_NONE RUBY_TAG_NONE
165 #define TAG_RETURN RUBY_TAG_RETURN
166 #define TAG_BREAK RUBY_TAG_BREAK
167 #define TAG_NEXT RUBY_TAG_NEXT
168 #define TAG_RETRY RUBY_TAG_RETRY
169 #define TAG_REDO RUBY_TAG_REDO
170 #define TAG_RAISE RUBY_TAG_RAISE
171 #define TAG_THROW RUBY_TAG_THROW
172 #define TAG_FATAL RUBY_TAG_FATAL
173 #define TAG_MASK RUBY_TAG_MASK
174 
179 };
180 
181 /* forward declarations */
182 struct rb_thread_struct;
184 
185 /* iseq data type */
187 
191  union {
192  size_t index;
194  } ic_value;
195 };
196 
198  struct {
201  } once;
203 };
204 
214 };
215 
216 struct rb_call_info {
217  /* fixed at compile time */
219  unsigned int flag;
221 };
222 
225  VALUE keywords[1];
226 };
227 
229  struct rb_call_info ci;
231 };
232 
236  int argc;
237 };
238 
239 struct rb_call_cache;
240 typedef VALUE (*vm_call_handler)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc);
241 
243  /* inline cache: keys */
246 
247  /* inline cache: values */
249 
251 
252  union {
253  unsigned int index; /* used by ivar */
254  enum method_missing_reason method_missing_reason; /* used by method_missing */
255  int inc_sp; /* used by cfunc */
256  } aux;
257 };
258 
259 #if 1
260 #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
261 #else
262 #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
263 #endif
264 #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
265 
266 typedef struct rb_iseq_location_struct {
267  VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
268  VALUE base_label; /* String */
269  VALUE label; /* String */
270  VALUE first_lineno; /* TODO: may be unsigned short */
272 
273 #define PATHOBJ_PATH 0
274 #define PATHOBJ_REALPATH 1
275 
276 static inline VALUE
277 pathobj_path(VALUE pathobj)
278 {
279  if (RB_TYPE_P(pathobj, T_STRING)) {
280  return pathobj;
281  }
282  else {
283  VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
284  return RARRAY_AREF(pathobj, PATHOBJ_PATH);
285  }
286 }
287 
288 static inline VALUE
289 pathobj_realpath(VALUE pathobj)
290 {
291  if (RB_TYPE_P(pathobj, T_STRING)) {
292  return pathobj;
293  }
294  else {
295  VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
296  return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
297  }
298 }
299 
301  enum iseq_type {
310  ISEQ_TYPE_DEFINED_GUARD
311  } type; /* instruction sequence type */
312 
313  unsigned int iseq_size;
314  const VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
315 
339  struct {
340  struct {
341  unsigned int has_lead : 1;
342  unsigned int has_opt : 1;
343  unsigned int has_rest : 1;
344  unsigned int has_post : 1;
345  unsigned int has_kw : 1;
346  unsigned int has_kwrest : 1;
347  unsigned int has_block : 1;
348 
349  unsigned int ambiguous_param0 : 1; /* {|a|} */
350  } flags;
351 
352  unsigned int size;
353 
354  int lead_num;
355  int opt_num;
358  int post_num;
360 
361  const VALUE *opt_table; /* (opt_num + 1) entries. */
362  /* opt_num and opt_table:
363  *
364  * def foo o1=e1, o2=e2, ..., oN=eN
365  * #=>
366  * # prologue code
367  * A1: e1
368  * A2: e2
369  * ...
370  * AN: eN
371  * AL: body
372  * opt_num = N
373  * opt_table = [A1, A2, ..., AN, AL]
374  */
375 
376  const struct rb_iseq_param_keyword {
377  int num;
380  int rest_start;
381  const ID *table;
383  } *keyword;
384  } param;
385 
387 
388  /* insn info, must be freed */
390 
391  const ID *local_table; /* must free */
392 
393  /* catch table */
394  const struct iseq_catch_table *catch_table;
395 
396  /* for child iseq */
398  struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
399 
401  struct rb_call_info *ci_entries; /* struct rb_call_info ci_entries[ci_size];
402  * struct rb_call_info_with_kwarg cikw_entries[ci_kw_size];
403  * So that:
404  * struct rb_call_info_with_kwarg *cikw_entries = &body->ci_entries[ci_size];
405  */
406  struct rb_call_cache *cc_entries; /* size is ci_size = ci_kw_size */
407 
408  VALUE mark_ary; /* Array: includes operands which should be GC marked */
409 
410  unsigned int local_table_size;
411  unsigned int is_size;
412  unsigned int ci_size;
413  unsigned int ci_kw_size;
414  unsigned int line_info_size;
415  unsigned int stack_max; /* for stack overflow check */
416 };
417 
418 /* T_IMEMO/iseq */
419 /* typedef rb_iseq_t is in method.h */
424 
425  union { /* 4, 5 words */
426  struct iseq_compile_data *compile_data; /* used at compile time */
427 
428  struct {
430  int index;
431  } loader;
432  } aux;
433 };
434 
435 #ifndef USE_LAZY_LOAD
436 #define USE_LAZY_LOAD 0
437 #endif
438 
439 #if USE_LAZY_LOAD
440 const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
441 #endif
442 
443 static inline const rb_iseq_t *
444 rb_iseq_check(const rb_iseq_t *iseq)
445 {
446 #if USE_LAZY_LOAD
447  if (iseq->body == NULL) {
448  rb_iseq_complete((rb_iseq_t *)iseq);
449  }
450 #endif
451  return iseq;
452 }
453 
461 };
462 
489 
491 };
492 
493 #define GetVMPtr(obj, ptr) \
494  GetCoreDataFromValue((obj), rb_vm_t, (ptr))
495 
496 struct rb_vm_struct;
497 typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
498 
499 typedef struct rb_at_exit_list {
503 
504 struct rb_objspace;
505 struct rb_objspace *rb_objspace_alloc(void);
506 void rb_objspace_free(struct rb_objspace *);
507 
508 typedef struct rb_hook_list_struct {
513 
514 typedef struct rb_vm_struct {
515  VALUE self;
516 
518  rb_nativethread_lock_t thread_destruct_lock;
519 
522 
523  struct list_head waiting_fds; /* <=> struct waiting_fd */
524  struct list_head living_threads;
527 
528  unsigned int running: 1;
529  unsigned int thread_abort_on_exception: 1;
530  unsigned int thread_report_on_exception: 1;
532  volatile int sleeper;
533 
534  /* object management */
536  const VALUE special_exceptions[ruby_special_error_count];
537 
538  /* load */
548 
549  /* signal */
550  struct {
552  unsigned char safe[RUBY_NSIG];
553  } trap_list;
554 
555  /* hook */
557 
558  /* relation table of ensure - rollback for callcc */
560 
561  /* postponed_job */
564 
566 
567  VALUE verbose, debug, orig_progname, progname;
570 
572 
574 
576 
579 
580  /* params */
581  struct { /* size in byte */
586  } default_params;
587 
588  short redefined_flag[BOP_LAST_];
589 } rb_vm_t;
590 
591 /* default values */
592 
593 #define RUBY_VM_SIZE_ALIGN 4096
594 
595 #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
596 #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
597 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
598 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
599 
600 #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
601 #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
602 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
603 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
604 
605 /* optimize insn */
606 #define INTEGER_REDEFINED_OP_FLAG (1 << 0)
607 #define FLOAT_REDEFINED_OP_FLAG (1 << 1)
608 #define STRING_REDEFINED_OP_FLAG (1 << 2)
609 #define ARRAY_REDEFINED_OP_FLAG (1 << 3)
610 #define HASH_REDEFINED_OP_FLAG (1 << 4)
611 /* #define BIGNUM_REDEFINED_OP_FLAG (1 << 5) */
612 #define SYMBOL_REDEFINED_OP_FLAG (1 << 6)
613 #define TIME_REDEFINED_OP_FLAG (1 << 7)
614 #define REGEXP_REDEFINED_OP_FLAG (1 << 8)
615 #define NIL_REDEFINED_OP_FLAG (1 << 9)
616 #define TRUE_REDEFINED_OP_FLAG (1 << 10)
617 #define FALSE_REDEFINED_OP_FLAG (1 << 11)
618 
619 #define BASIC_OP_UNREDEFINED_P(op, klass) (LIKELY((GET_VM()->redefined_flag[(op)]&(klass)) == 0))
620 
621 #ifndef VM_DEBUG_BP_CHECK
622 #define VM_DEBUG_BP_CHECK 0
623 #endif
624 
625 #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
626 #define VM_DEBUG_VERIFY_METHOD_CACHE (VM_DEBUG_MODE != 0)
627 #endif
628 
630  VALUE self;
631  const VALUE *ep;
632  union {
633  const rb_iseq_t *iseq;
634  const struct vm_ifunc *ifunc;
636  } code;
637 };
638 
644 };
645 
651 };
652 
653 struct rb_block {
654  union {
655  struct rb_captured_block captured;
658  } as;
659  enum rb_block_type type;
660 };
661 
662 typedef struct rb_control_frame_struct {
663  const VALUE *pc; /* cfp[0] */
664  VALUE *sp; /* cfp[1] */
665  const rb_iseq_t *iseq; /* cfp[2] */
666  VALUE self; /* cfp[3] / block[0] */
667  const VALUE *ep; /* cfp[4] / block[1] */
668  const void *block_code; /* cfp[5] / block[2] */ /* iseq or ifunc */
669 
670 #if VM_DEBUG_BP_CHECK
671  VALUE *bp_check; /* cfp[6] */
672 #endif
674 
676 
677 static inline struct rb_thread_struct *
678 rb_thread_ptr(VALUE thval)
679 {
680  return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
681 }
682 
688 };
689 
690 typedef RUBY_JMP_BUF rb_jmpbuf_t;
691 
692 /*
693  the members which are written in TH_PUSH_TAG() should be placed at
694  the beginning and the end, so that entire region is accessible.
695 */
696 struct rb_vm_tag {
699  rb_jmpbuf_t buf;
700  struct rb_vm_tag *prev;
702 };
703 
704 STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
705 STATIC_ASSERT(rb_vm_tag_buf_end,
706  offsetof(struct rb_vm_tag, buf) + sizeof(rb_jmpbuf_t) <
707  sizeof(struct rb_vm_tag));
708 
711 };
712 
715  void *arg;
716 };
717 
718 struct rb_mutex_struct;
719 
720 typedef struct rb_thread_list_struct{
724 
725 typedef struct rb_ensure_entry {
727  VALUE (*e_proc)(ANYARGS);
730 
731 typedef struct rb_ensure_list {
733  struct rb_ensure_entry entry;
735 
736 typedef char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) * 2 + 3];
737 
739 
741  /* execution information */
742  VALUE *vm_stack; /* must free, must mark */
743  size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
745 
746  struct rb_vm_tag *tag;
750 
751  /* temporary place of errinfo */
753 
754  /* storage (ec (fiber) local) */
758 
759  /* eval env */
760  const VALUE *root_lep;
762 
763  /* trace information */
765 
766  /* ensure & callcc */
767  rb_ensure_list_t *ensure_list;
768 
770 
771  /* for GC */
772  struct {
776 #ifdef __ia64
777  VALUE *register_stack_start;
778  VALUE *register_stack_end;
779  size_t register_stack_maxsize;
780 #endif
781  jmp_buf regs;
782  } machine;
784 
785 typedef struct rb_thread_struct {
786  struct list_node vmlt_node;
787  VALUE self;
789 
791 
792  VALUE last_status; /* $? */
793 
794  /* for rb_iterate */
796 
797  /* for bmethod */
799 
800  /* for cfunc */
802 
803  /* for load(true) */
806 
807  /* thread control */
808  rb_nativethread_id_t thread_id;
809 #ifdef NON_SCALAR_THREAD_ID
810  rb_thread_id_string_t thread_id_string;
811 #endif
812  enum rb_thread_status status;
813  int to_kill;
814  int priority;
815 
818 
821 
822  /* temporary place of retval on OPT_CALL_THREADED_CODE */
823 #if OPT_CALL_THREADED_CODE
824  VALUE retval;
825 #endif
826 
827  /* async errinfo queue */
831 
833  unsigned long interrupt_mask;
834  rb_nativethread_lock_t interrupt_lock;
835  struct rb_unblock_callback unblock;
838 
840 
843  VALUE (*first_func)(ANYARGS);
844 
845  /* statistics data for profiler */
847 
848  /* tracer */
850 
851  /* fiber */
853  rb_jmpbuf_t root_jmpbuf;
854 
855  /* misc */
857  unsigned int abort_on_exception: 1;
858  unsigned int report_on_exception: 1;
859 #ifdef USE_SIGALTSTACK
860  void *altstack;
861 #endif
862  uint32_t running_time_us; /* 12500..800000 */
864 } rb_thread_t;
865 
866 typedef enum {
870  /* 0x03..0x06 is reserved */
873 
874 #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
875 #define VM_DEFINECLASS_FLAG_SCOPED 0x08
876 #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
877 #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
878 #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
879  ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
880 
881 /* iseq.c */
883 
884 /* node -> iseq */
885 rb_iseq_t *rb_iseq_new (NODE *node, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type);
886 rb_iseq_t *rb_iseq_new_top (NODE *node, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
887 rb_iseq_t *rb_iseq_new_main (NODE *node, VALUE path, VALUE realpath, const rb_iseq_t *parent);
888 rb_iseq_t *rb_iseq_new_with_opt(NODE* node, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno,
889  const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t*);
890 
891 /* src -> iseq */
892 rb_iseq_t *rb_iseq_compile(VALUE src, VALUE file, VALUE line);
893 rb_iseq_t *rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block);
894 rb_iseq_t *rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE realpath, VALUE line, const struct rb_block *base_block, VALUE opt);
895 
896 VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
897 int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
898 const char *ruby_node_name(int node);
899 
900 VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
901 
906 
907 #define GetProcPtr(obj, ptr) \
908  GetCoreDataFromValue((obj), rb_proc_t, (ptr))
909 
910 typedef struct {
911  const struct rb_block block;
912  int8_t safe_level; /* 0..1 */
913  int8_t is_from_method; /* bool */
914  int8_t is_lambda; /* bool */
915 } rb_proc_t;
916 
917 typedef struct {
918  VALUE flags; /* imemo header */
919  const rb_iseq_t *iseq;
920  const VALUE *ep;
921  const VALUE *env;
922  unsigned int env_size;
923 } rb_env_t;
924 
926 
927 #define GetBindingPtr(obj, ptr) \
928  GetCoreDataFromValue((obj), rb_binding_t, (ptr))
929 
930 typedef struct {
931  const struct rb_block block;
932  const VALUE pathobj;
933  unsigned short first_lineno;
934 } rb_binding_t;
935 
936 /* used by compile time and send insn */
937 
942 };
943 
944 #define VM_CHECKMATCH_TYPE_MASK 0x03
945 #define VM_CHECKMATCH_ARRAY 0x04
946 
948  VM_CALL_ARGS_SPLAT_bit, /* m(*args) */
949  VM_CALL_ARGS_BLOCKARG_bit, /* m(&block) */
950  VM_CALL_FCALL_bit, /* m(...) */
952  VM_CALL_ARGS_SIMPLE_bit, /* (ci->flag & (SPLAT|BLOCKARG)) && blockiseq == NULL && ci->kw_arg == NULL */
953  VM_CALL_BLOCKISEQ_bit, /* has blockiseq */
954  VM_CALL_KWARG_bit, /* has kwarg */
955  VM_CALL_KW_SPLAT_bit, /* m(**opts) */
956  VM_CALL_TAILCALL_bit, /* located at tail position */
957  VM_CALL_SUPER_bit, /* super */
958  VM_CALL_OPT_SEND_bit, /* internal flag */
960 };
961 
962 #define VM_CALL_ARGS_SPLAT (0x01 << VM_CALL_ARGS_SPLAT_bit)
963 #define VM_CALL_ARGS_BLOCKARG (0x01 << VM_CALL_ARGS_BLOCKARG_bit)
964 #define VM_CALL_FCALL (0x01 << VM_CALL_FCALL_bit)
965 #define VM_CALL_VCALL (0x01 << VM_CALL_VCALL_bit)
966 #define VM_CALL_ARGS_SIMPLE (0x01 << VM_CALL_ARGS_SIMPLE_bit)
967 #define VM_CALL_BLOCKISEQ (0x01 << VM_CALL_BLOCKISEQ_bit)
968 #define VM_CALL_KWARG (0x01 << VM_CALL_KWARG_bit)
969 #define VM_CALL_KW_SPLAT (0x01 << VM_CALL_KW_SPLAT_bit)
970 #define VM_CALL_TAILCALL (0x01 << VM_CALL_TAILCALL_bit)
971 #define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit)
972 #define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit)
973 
978 };
979 
981  VM_SVAR_LASTLINE = 0, /* $_ */
982  VM_SVAR_BACKREF = 1, /* $~ */
983 
985  VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
986 };
987 
988 /* inline cache */
989 typedef struct iseq_inline_cache_entry *IC;
990 typedef struct rb_call_info *CALL_INFO;
991 typedef struct rb_call_cache *CALL_CACHE;
992 
993 void rb_vm_change_state(void);
994 
995 typedef VALUE CDHASH;
996 
997 #ifndef FUNC_FASTCALL
998 #define FUNC_FASTCALL(x) x
999 #endif
1000 
1001 typedef rb_control_frame_t *
1003 
1004 #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1005 #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1006 
1007 #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1008 #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1009 #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1010 
1011 enum {
1012  /* Frame/Environment flag bits:
1013  * MMMM MMMM MMMM MMMM ____ __FF FFFF EEEX (LSB)
1014  *
1015  * X : tag for GC marking (It seems as Fixnum)
1016  * EEE : 3 bits Env flags
1017  * FF..: 6 bits Frame flags
1018  * MM..: 16 bits frame magic (to check frame corruption)
1019  */
1020 
1021  /* frame types */
1023  VM_FRAME_MAGIC_BLOCK = 0x22220001,
1024  VM_FRAME_MAGIC_CLASS = 0x33330001,
1025  VM_FRAME_MAGIC_TOP = 0x44440001,
1026  VM_FRAME_MAGIC_CFUNC = 0x55550001,
1027  VM_FRAME_MAGIC_IFUNC = 0x66660001,
1028  VM_FRAME_MAGIC_EVAL = 0x77770001,
1030  VM_FRAME_MAGIC_DUMMY = 0x99990001,
1031 
1032  VM_FRAME_MAGIC_MASK = 0xffff0001,
1033 
1034  /* frame flag */
1040 
1041  /* env flag */
1045 };
1046 
1047 #define VM_ENV_DATA_SIZE ( 3)
1048 
1049 #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1050 #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1051 #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1052 #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1053 #define VM_ENV_DATA_INDEX_ENV_PROC ( 2) /* ep[ 2] */
1054 
1055 #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1056 
1057 static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1058 
1059 static inline void
1060 VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1061 {
1062  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1063  VM_ASSERT(FIXNUM_P(flags));
1064  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1065 }
1066 
1067 static inline void
1068 VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1069 {
1070  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1071  VM_ASSERT(FIXNUM_P(flags));
1072  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1073 }
1074 
1075 static inline unsigned long
1076 VM_ENV_FLAGS(const VALUE *ep, long flag)
1077 {
1078  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1079  VM_ASSERT(FIXNUM_P(flags));
1080  return flags & flag;
1081 }
1082 
1083 static inline unsigned long
1084 VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1085 {
1086  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1087 }
1088 
1089 static inline int
1090 VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1091 {
1092  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1093 }
1094 
1095 static inline int
1096 VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1097 {
1098  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1099 }
1100 
1101 static inline int
1102 VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1103 {
1104  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1105 }
1106 
1107 static inline int
1108 rb_obj_is_iseq(VALUE iseq)
1109 {
1110  return imemo_type_p(iseq, imemo_iseq);
1111 }
1112 
1113 #if VM_CHECK_MODE > 0
1114 #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1115 #endif
1116 
1117 static inline int
1118 VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1119 {
1120  int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1121  VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p);
1122  return cframe_p;
1123 }
1124 
1125 static inline int
1126 VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1127 {
1128  return !VM_FRAME_CFRAME_P(cfp);
1129 }
1130 
1131 #define RUBYVM_CFUNC_FRAME_P(cfp) \
1132  (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1133 
1134 #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1135 #define VM_BLOCK_HANDLER_NONE 0
1136 
1137 static inline int
1138 VM_ENV_LOCAL_P(const VALUE *ep)
1139 {
1140  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1141 }
1142 
1143 static inline const VALUE *
1144 VM_ENV_PREV_EP(const VALUE *ep)
1145 {
1146  VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1148 }
1149 
1150 static inline VALUE
1151 VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1152 {
1153  VM_ASSERT(VM_ENV_LOCAL_P(ep));
1154  return ep[VM_ENV_DATA_INDEX_SPECVAL];
1155 }
1156 
1157 #if VM_CHECK_MODE > 0
1158 int rb_vm_ep_in_heap_p(const VALUE *ep);
1159 #endif
1160 
1161 static inline int
1162 VM_ENV_ESCAPED_P(const VALUE *ep)
1163 {
1164  VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1165  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1166 }
1167 
1168 #if VM_CHECK_MODE > 0
1169 static inline int
1170 vm_assert_env(VALUE obj)
1171 {
1172  VM_ASSERT(imemo_type_p(obj, imemo_env));
1173  return 1;
1174 }
1175 #endif
1176 
1177 static inline VALUE
1178 VM_ENV_ENVVAL(const VALUE *ep)
1179 {
1180  VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1181  VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1182  VM_ASSERT(vm_assert_env(envval));
1183  return envval;
1184 }
1185 
1186 static inline const rb_env_t *
1187 VM_ENV_ENVVAL_PTR(const VALUE *ep)
1188 {
1189  return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1190 }
1191 
1192 static inline VALUE
1193 VM_ENV_PROCVAL(const VALUE *ep)
1194 {
1195  VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1196  VM_ASSERT(VM_ENV_LOCAL_P(ep));
1197  VM_ASSERT(VM_ENV_BLOCK_HANDLER(ep) != VM_BLOCK_HANDLER_NONE);
1198 
1199  return ep[VM_ENV_DATA_INDEX_ENV_PROC];
1200 }
1201 
1202 static inline const rb_env_t *
1203 vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1204 {
1205  rb_env_t *env = (rb_env_t *)rb_imemo_new(imemo_env, (VALUE)env_ep, (VALUE)env_body, 0, (VALUE)iseq);
1206  env->env_size = env_size;
1207  env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1208  return env;
1209 }
1210 
1211 static inline void
1212 VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1213 {
1214  *((VALUE *)ptr) = v;
1215 }
1216 
1217 static inline void
1218 VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1219 {
1220  VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1221  VM_FORCE_WRITE(ptr, special_const_value);
1222 }
1223 
1224 static inline void
1225 VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1226 {
1227  VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1228  VM_FORCE_WRITE(&ep[index], v);
1229 }
1230 
1231 const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1232 const VALUE *rb_vm_proc_local_ep(VALUE proc);
1233 void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1234 void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1235 
1237 
1238 #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1239 #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1240 #define RUBY_VM_END_CONTROL_FRAME(th) \
1241  ((rb_control_frame_t *)((th)->ec.vm_stack + (th)->ec.vm_stack_size))
1242 #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1243  ((void *)(ecfp) > (void *)(cfp))
1244 #define RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(th, cfp) \
1245  (!RUBY_VM_VALID_CONTROL_FRAME_P((cfp), RUBY_VM_END_CONTROL_FRAME(th)))
1246 
1247 static inline int
1248 VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1249 {
1250  if ((block_handler & 0x03) == 0x01) {
1251 #if VM_CHECK_MODE > 0
1252  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1253  VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1254 #endif
1255  return 1;
1256  }
1257  else {
1258  return 0;
1259  }
1260 }
1261 
1262 static inline VALUE
1263 VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1264 {
1265  VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1266  VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1267  return block_handler;
1268 }
1269 
1270 static inline const struct rb_captured_block *
1271 VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1272 {
1273  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1274  VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1275  return captured;
1276 }
1277 
1278 static inline int
1279 VM_BH_IFUNC_P(VALUE block_handler)
1280 {
1281  if ((block_handler & 0x03) == 0x03) {
1282 #if VM_CHECK_MODE > 0
1283  struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1284  VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1285 #endif
1286  return 1;
1287  }
1288  else {
1289  return 0;
1290  }
1291 }
1292 
1293 static inline VALUE
1294 VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1295 {
1296  VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1297  VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1298  return block_handler;
1299 }
1300 
1301 static inline const struct rb_captured_block *
1302 VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1303 {
1304  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1305  VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1306  return captured;
1307 }
1308 
1309 static inline const struct rb_captured_block *
1310 VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1311 {
1312  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1313  VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1314  return captured;
1315 }
1316 
1317 static inline enum rb_block_handler_type
1318 vm_block_handler_type(VALUE block_handler)
1319 {
1320  if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1321  return block_handler_type_iseq;
1322  }
1323  else if (VM_BH_IFUNC_P(block_handler)) {
1324  return block_handler_type_ifunc;
1325  }
1326  else if (SYMBOL_P(block_handler)) {
1328  }
1329  else {
1330  VM_ASSERT(rb_obj_is_proc(block_handler));
1331  return block_handler_type_proc;
1332  }
1333 }
1334 
1335 static inline void
1336 vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1337 {
1338  VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1339  (vm_block_handler_type(block_handler), 1));
1340 }
1341 
1342 static inline enum rb_block_type
1343 vm_block_type(const struct rb_block *block)
1344 {
1345 #if VM_CHECK_MODE > 0
1346  switch (block->type) {
1347  case block_type_iseq:
1348  VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1349  break;
1350  case block_type_ifunc:
1351  VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1352  break;
1353  case block_type_symbol:
1354  VM_ASSERT(SYMBOL_P(block->as.symbol));
1355  break;
1356  case block_type_proc:
1357  VM_ASSERT(rb_obj_is_proc(block->as.proc));
1358  break;
1359  }
1360 #endif
1361  return block->type;
1362 }
1363 
1364 static inline void
1365 vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1366 {
1367  struct rb_block *mb = (struct rb_block *)block;
1368  mb->type = type;
1369 }
1370 
1371 static inline const struct rb_block *
1372 vm_proc_block(VALUE procval)
1373 {
1374  VM_ASSERT(rb_obj_is_proc(procval));
1375  return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1376 }
1377 
1378 static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1379 static inline const VALUE *vm_block_ep(const struct rb_block *block);
1380 
1381 static inline const rb_iseq_t *
1382 vm_proc_iseq(VALUE procval)
1383 {
1384  return vm_block_iseq(vm_proc_block(procval));
1385 }
1386 
1387 static inline const VALUE *
1388 vm_proc_ep(VALUE procval)
1389 {
1390  return vm_block_ep(vm_proc_block(procval));
1391 }
1392 
1393 static inline const rb_iseq_t *
1394 vm_block_iseq(const struct rb_block *block)
1395 {
1396  switch (vm_block_type(block)) {
1397  case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1398  case block_type_proc: return vm_proc_iseq(block->as.proc);
1399  case block_type_ifunc:
1400  case block_type_symbol: return NULL;
1401  }
1402  VM_UNREACHABLE(vm_block_iseq);
1403  return NULL;
1404 }
1405 
1406 static inline const VALUE *
1407 vm_block_ep(const struct rb_block *block)
1408 {
1409  switch (vm_block_type(block)) {
1410  case block_type_iseq:
1411  case block_type_ifunc: return block->as.captured.ep;
1412  case block_type_proc: return vm_proc_ep(block->as.proc);
1413  case block_type_symbol: return NULL;
1414  }
1415  VM_UNREACHABLE(vm_block_ep);
1416  return NULL;
1417 }
1418 
1419 static inline VALUE
1420 vm_block_self(const struct rb_block *block)
1421 {
1422  switch (vm_block_type(block)) {
1423  case block_type_iseq:
1424  case block_type_ifunc:
1425  return block->as.captured.self;
1426  case block_type_proc:
1427  return vm_block_self(vm_proc_block(block->as.proc));
1428  case block_type_symbol:
1429  return Qundef;
1430  }
1431  VM_UNREACHABLE(vm_block_self);
1432  return Qundef;
1433 }
1434 
1435 static inline VALUE
1436 VM_BH_TO_SYMBOL(VALUE block_handler)
1437 {
1438  VM_ASSERT(SYMBOL_P(block_handler));
1439  return block_handler;
1440 }
1441 
1442 static inline VALUE
1443 VM_BH_FROM_SYMBOL(VALUE symbol)
1444 {
1445  VM_ASSERT(SYMBOL_P(symbol));
1446  return symbol;
1447 }
1448 
1449 static inline VALUE
1450 VM_BH_TO_PROC(VALUE block_handler)
1451 {
1452  VM_ASSERT(rb_obj_is_proc(block_handler));
1453  return block_handler;
1454 }
1455 
1456 static inline VALUE
1457 VM_BH_FROM_PROC(VALUE procval)
1458 {
1459  VM_ASSERT(rb_obj_is_proc(procval));
1460  return procval;
1461 }
1462 
1463 /* VM related object allocate functions */
1464 VALUE rb_thread_alloc(VALUE klass);
1465 VALUE rb_proc_alloc(VALUE klass);
1467 
1468 /* for debug */
1470 extern void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *_pc);
1472 
1473 #define SDR() rb_vmdebug_stack_dump_raw(GET_THREAD(), GET_THREAD()->ec.cfp)
1474 #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_THREAD(), (cfp))
1475 void rb_vm_bugreport(const void *);
1476 NORETURN(void rb_bug_context(const void *, const char *fmt, ...));
1477 
1478 /* functions about thread/vm execution */
1480 VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1481 VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1482 VALUE rb_iseq_path(const rb_iseq_t *iseq);
1483 VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1485 
1486 VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1487 void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1488 
1489 int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, ID *called_idp, VALUE *klassp);
1490 
1491 VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE block_handler);
1492 VALUE rb_vm_make_proc_lambda(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1493 VALUE rb_vm_make_proc(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass);
1496 const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1497 const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1499 void rb_vm_gvl_destroy(rb_vm_t *vm);
1500 VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc,
1501  const VALUE *argv, const rb_callable_method_entry_t *me);
1502 void rb_vm_pop_frame(rb_thread_t *th);
1503 
1504 void rb_thread_start_timer_thread(void);
1505 void rb_thread_stop_timer_thread(void);
1506 void rb_thread_reset_timer_thread(void);
1508 
1509 static inline void
1510 rb_vm_living_threads_init(rb_vm_t *vm)
1511 {
1512  list_head_init(&vm->waiting_fds);
1513  list_head_init(&vm->living_threads);
1514  vm->living_thread_num = 0;
1515 }
1516 
1517 static inline void
1518 rb_vm_living_threads_insert(rb_vm_t *vm, rb_thread_t *th)
1519 {
1520  list_add_tail(&vm->living_threads, &th->vmlt_node);
1521  vm->living_thread_num++;
1522 }
1523 
1524 static inline void
1525 rb_vm_living_threads_remove(rb_vm_t *vm, rb_thread_t *th)
1526 {
1527  list_del(&th->vmlt_node);
1528  vm->living_thread_num--;
1529 }
1530 
1531 typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1535 VALUE rb_name_err_mesg_new(VALUE mesg, VALUE recv, VALUE method);
1538 int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1540 
1541 void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1542 
1543 #define rb_vm_register_special_exception(sp, e, m) \
1544  rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1545 
1547 
1549 
1550 void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1551 
1553 
1554 #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1555 
1556 #define RUBY_CONST_ASSERT(expr) (1/!!(expr)) /* expr must be a compile-time constant */
1557 #define VM_STACK_OVERFLOWED_P(cfp, sp, margin) \
1558  (!RUBY_CONST_ASSERT(sizeof(*(sp)) == sizeof(VALUE)) || \
1559  !RUBY_CONST_ASSERT(sizeof(*(cfp)) == sizeof(rb_control_frame_t)) || \
1560  ((rb_control_frame_t *)((sp) + (margin)) + 1) >= (cfp))
1561 #define WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) \
1562  if (LIKELY(!VM_STACK_OVERFLOWED_P(cfp, sp, margin))) {(void)0;} else /* overflowed */
1563 #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) \
1564  WHEN_VM_STACK_OVERFLOWED(cfp, sp, margin) vm_stackoverflow()
1565 #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1566  WHEN_VM_STACK_OVERFLOWED(cfp, (cfp)->sp, margin) vm_stackoverflow()
1567 
1568 VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1569 
1570 /* for thread */
1571 
1572 #if RUBY_VM_THREAD_MODEL == 2
1573 
1575 
1577 extern rb_vm_t *ruby_current_vm;
1579 
1581 
1582 #define GET_VM() ruby_current_vm
1583 #define GET_THREAD() ruby_current_thread
1584 
1585 #define rb_thread_set_current_raw(th) (void)(ruby_current_thread = (th))
1586 #define rb_thread_set_current(th) do { \
1587  if ((th)->vm->running_thread != (th)) { \
1588  (th)->running_time_us = 0; \
1589  } \
1590  rb_thread_set_current_raw(th); \
1591  (th)->vm->running_thread = (th); \
1592 } while (0)
1593 
1594 #else
1595 #error "unsupported thread model"
1596 #endif
1597 
1598 enum {
1603 };
1604 
1605 #define RUBY_VM_SET_TIMER_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TIMER_INTERRUPT_MASK)
1606 #define RUBY_VM_SET_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, PENDING_INTERRUPT_MASK)
1607 #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
1608 #define RUBY_VM_SET_TRAP_INTERRUPT(th) ATOMIC_OR((th)->interrupt_flag, TRAP_INTERRUPT_MASK)
1609 #define RUBY_VM_INTERRUPTED(th) ((th)->interrupt_flag & ~(th)->interrupt_mask & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
1610 #define RUBY_VM_INTERRUPTED_ANY(th) ((th)->interrupt_flag & ~(th)->interrupt_mask)
1611 
1613 int rb_signal_buff_size(void);
1614 void rb_signal_exec(rb_thread_t *th, int sig);
1616 void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
1624 void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo);
1626 
1627 #define RUBY_VM_CHECK_INTS(th) ruby_vm_check_ints(th)
1628 static inline void
1629 ruby_vm_check_ints(rb_thread_t *th)
1630 {
1631  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(th))) {
1633  }
1634 }
1635 
1636 /* tracer */
1641  VALUE self;
1646 
1648 
1649  /* calc from cfp */
1650  int lineno;
1652 };
1653 
1654 void rb_threadptr_exec_event_hooks(struct rb_trace_arg_struct *trace_arg);
1656 
1657 #define EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
1658  const rb_event_flag_t flag_arg_ = (flag_); \
1659  if (UNLIKELY(ruby_vm_event_flags & (flag_arg_))) { \
1660  /* defer evaluating the other arguments */ \
1661  ruby_exec_event_hook_orig(th_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
1662  } \
1663 } while (0)
1664 
1665 static inline void
1666 ruby_exec_event_hook_orig(rb_thread_t *const th, const rb_event_flag_t flag,
1667  VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
1668 {
1669  if ((th->event_hooks.events | th->vm->event_hooks.events) & flag) {
1670  struct rb_trace_arg_struct trace_arg;
1671  trace_arg.event = flag;
1672  trace_arg.th = th;
1673  trace_arg.cfp = th->ec.cfp;
1674  trace_arg.self = self;
1675  trace_arg.id = id;
1676  trace_arg.called_id = called_id;
1677  trace_arg.klass = klass;
1678  trace_arg.data = data;
1679  trace_arg.path = Qundef;
1680  trace_arg.klass_solved = 0;
1681  if (pop_p) rb_threadptr_exec_event_hooks_and_pop_frame(&trace_arg);
1682  else rb_threadptr_exec_event_hooks(&trace_arg);
1683  }
1684 }
1685 
1686 #define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_) \
1687  EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, called_id_, klass_, data_, 0)
1688 
1689 #define EXEC_EVENT_HOOK_AND_POP_FRAME(th_, flag_, self_, id_, called_id_, klass_, data_) \
1690  EXEC_EVENT_HOOK_ORIG(th_, flag_, self_, id_, called_id_, klass_, data_, 1)
1691 
1693 
1695 
1696 extern VALUE rb_get_coverages(void);
1697 extern void rb_set_coverages(VALUE, int);
1698 extern void rb_reset_coverages(void);
1699 
1701 
1703 
1704 #endif /* RUBY_VM_CORE_H */
void rb_thread_stop_timer_thread(void)
Definition: thread.c:4070
unsigned int stack_max
Definition: vm_core.h:415
void rb_objspace_free(struct rb_objspace *)
Definition: gc.c:1343
rb_thread_list_t * join_list
Definition: vm_core.h:839
union iseq_inline_storage_entry * is_entries
Definition: vm_core.h:400
struct rb_ensure_entry rb_ensure_entry_t
const VALUE * ep
Definition: vm_core.h:667
rb_vm_t * vm
Definition: vm_core.h:788
const VALUE * root_lep
Definition: vm_core.h:760
VALUE rb_get_coverages(void)
Definition: thread.c:5044
VALUE expanded_load_path
Definition: vm_core.h:543
VALUE rb_vm_call(rb_thread_t *th, VALUE recv, VALUE id, int argc, const VALUE *argv, const rb_callable_method_entry_t *me)
Definition: vm_eval.c:206
void rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
Definition: thread.c:1589
void rb_threadptr_signal_raise(rb_thread_t *th, int sig)
Definition: thread.c:2149
const VALUE * default_values
Definition: vm_core.h:382
VALUE passed_block_handler
Definition: vm_core.h:795
void rb_postponed_job_flush(rb_vm_t *vm)
Definition: vm_trace.c:1592
ruby_tag_type
Definition: vm_core.h:151
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
Definition: gc.c:4274
struct rb_thread_struct * running_thread
Definition: vm_core.h:521
iterator function
Definition: internal.h:843
Definition: st.h:79
void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
Definition: thread.c:462
int pending_interrupt_queue_checked
Definition: vm_core.h:830
const rb_callable_method_entry_t * me
Definition: vm_core.h:248
VALUE rb_proc_alloc(VALUE klass)
Definition: proc.c:109
VALUE value
Definition: vm_core.h:200
unsigned int has_lead
Definition: vm_core.h:341
STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0)
enum ruby_tag_type state
Definition: vm_core.h:701
struct rb_trace_arg_struct * trace_arg
Definition: vm_core.h:764
VALUE rb_iseq_disasm(const rb_iseq_t *iseq)
Definition: iseq.c:1515
rb_control_frame_t * cfp
Definition: vm_core.h:744
unsigned int is_size
Definition: vm_core.h:411
VALUE rb_iseq_eval_main(const rb_iseq_t *iseq)
Definition: vm.c:2037
VALUE top_self
Definition: vm_core.h:539
VALUE local_storage_recursive_hash_for_trace
Definition: vm_core.h:757
int8_t safe_level
Definition: vm_core.h:912
struct rb_thread_struct * th
Definition: vm_core.h:722
void rb_unblock_function_t(void *)
Definition: intern.h:872
void rb_thread_start_timer_thread(void)
Definition: thread.c:4084
VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
Definition: vm.c:82
rb_unblock_function_t * func
Definition: vm_core.h:714
const VALUE * env
Definition: vm_core.h:921
VALUE symbol
Definition: vm_core.h:656
struct iseq_compile_data * compile_data
Definition: vm_core.h:426
struct st_table * loaded_features_index
Definition: vm_core.h:546
struct list_node vmlt_node
Definition: vm_core.h:786
vm_check_match_type
Definition: vm_core.h:938
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
Definition: gc.c:2020
rb_iseq_t * rb_iseq_compile(VALUE src, VALUE file, VALUE line)
Definition: iseq.c:680
VALUE pending_interrupt_mask_stack
Definition: vm_core.h:829
VALUE reserved1
Definition: vm_core.h:422
const int id
Definition: nkf.c:209
const rb_callable_method_entry_t * rb_vm_frame_method_entry(const rb_control_frame_t *cfp)
RUBY_EXTERN VALUE rb_cRubyVM
Definition: vm_core.h:903
VALUE * defined_strings
Definition: vm_core.h:577
#define VM_ENV_DATA_INDEX_ENV_PROC
Definition: vm_core.h:1053
VALUE rb_thread_alloc(VALUE klass)
Definition: vm.c:2582
VALUE data2
Definition: vm_core.h:728
rb_thread_t * th
Definition: vm_core.h:1639
VALUE mark_object_ary
Definition: vm_core.h:535
#define RUBY_NSIG
Definition: vm_core.h:97
const rb_data_type_t ruby_binding_data_type
Definition: proc.c:289
#define MAYBE_UNUSED(x)
Definition: internal.h:46
int rb_autoloading_value(VALUE mod, ID id, VALUE *value)
Definition: variable.c:2015
const rb_iseq_t * iseq
Definition: vm_core.h:919
const ID * local_table
Definition: vm_core.h:391
#define VM_BLOCK_HANDLER_NONE
Definition: vm_core.h:1135
#define RB_SPECIAL_CONST_P(x)
Definition: ruby.h:1241
size_t fiber_machine_stack_size
Definition: vm_core.h:585
unsigned int report_on_exception
Definition: vm_core.h:858
struct rb_iseq_constant_body * body
Definition: vm_core.h:423
rb_vm_t * ruby_current_vm
Definition: vm.c:321
VALUE verbose
Definition: vm_core.h:567
void ruby_thread_init_stack(rb_thread_t *th)
Definition: thread.c:559
rb_ensure_list_t * ensure_list
Definition: vm_core.h:767
st_table * frozen_strings
Definition: vm_core.h:578
int coverage_mode
Definition: vm_core.h:569
struct rb_call_info_kw_arg * kw_arg
Definition: vm_core.h:230
unsigned int index
Definition: vm_core.h:253
rb_at_exit_list * at_exit
Definition: vm_core.h:575
#define VM_TAGGED_PTR_REF(v, mask)
Definition: vm_core.h:1005
struct rb_hook_list_struct rb_hook_list_t
RUBY_SYMBOL_EXPORT_BEGIN rb_thread_t * ruby_current_thread
Definition: vm.c:320
const VALUE * ep
Definition: vm_core.h:920
struct rb_vm_protect_tag * prev
Definition: vm_core.h:710
unsigned int flag
Definition: vm_core.h:219
VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr)
Definition: vm_eval.c:2006
rb_iseq_t * rb_iseq_new_main(NODE *node, VALUE path, VALUE realpath, const rb_iseq_t *parent)
Definition: iseq.c:473
rb_fiber_t * root_fiber
Definition: vm_core.h:852
struct rb_control_frame_struct rb_control_frame_t
rb_vm_at_exit_func * func
Definition: vm_core.h:500
VALUE last_status
Definition: vm_core.h:792
#define T_ARRAY
Definition: ruby.h:498
vm_call_handler call
Definition: vm_core.h:250
const VALUE * rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
Definition: vm.c:926
VALUE rb_name_err_mesg_new(VALUE mesg, VALUE recv, VALUE method)
Definition: error.c:1394
VALUE rb_vm_make_proc(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass)
Definition: vm.c:868
Definition: vm_core.h:725
unsigned int has_kwrest
Definition: vm_core.h:346
struct rb_thread_list_struct rb_thread_list_t
rb_vm_defineclass_type_t
Definition: vm_core.h:866
rb_event_flag_t events
Definition: vm_core.h:510
#define FIXNUM_P(f)
Definition: ruby.h:365
void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr)
void rb_vmdebug_stack_dump_raw(rb_thread_t *, rb_control_frame_t *)
Definition: vm_dump.c:141
VALUE value
Definition: vm_core.h:193
void rb_set_coverages(VALUE, int)
Definition: thread.c:5050
int rb_threadptr_pending_interrupt_active_p(rb_thread_t *th)
Definition: thread.c:1716
void rb_vm_stack_to_heap(rb_thread_t *th)
Definition: vm.c:731
const VALUE * ep
Definition: vm_core.h:631
unsigned int has_block
Definition: vm_core.h:347
VALUE thgroup_default
Definition: vm_core.h:526
VALUE flags
Definition: vm_core.h:918
Definition: node.h:233
const VALUE * iseq_encoded
Definition: vm_core.h:314
VALUE rb_vm_env_local_variables(const rb_env_t *env)
Definition: vm.c:785
unsigned int has_kw
Definition: vm_core.h:345
int orig_argc
Definition: vm_core.h:220
rb_control_frame_t * rb_vm_get_binding_creatable_next_cfp(const rb_thread_t *th, const rb_control_frame_t *cfp)
Definition: vm.c:486
unsigned int thread_abort_on_exception
Definition: vm_core.h:529
unsigned int has_rest
Definition: vm_core.h:343
unsigned int iseq_size
Definition: vm_core.h:313
#define RB_TYPE_P(obj, type)
Definition: ruby.h:527
VALUE defined_module_hash
Definition: vm_core.h:571
#define RUBY_VM_INTERRUPTED_ANY(th)
Definition: vm_core.h:1610
void rb_thread_reset_timer_thread(void)
Definition: thread.c:4078
#define UNLIKELY(x)
Definition: internal.h:43
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
Definition: ruby.h:1853
void rb_vmdebug_debug_print_pre(rb_thread_t *th, rb_control_frame_t *cfp, const VALUE *_pc)
Definition: vm_dump.c:333
size_t living_thread_num
Definition: vm_core.h:525
const rb_iseq_t * iseq
Definition: vm_core.h:665
unsigned short first_lineno
Definition: vm_core.h:933
void rb_bug_context(const void *ctx, const char *fmt,...)
Definition: error.c:536
void rb_thread_wakeup_timer_thread(void)
int src_encoding_index
Definition: vm_core.h:565
int trace_running
Definition: vm_core.h:531
VALUE(* vm_call_handler)(struct rb_thread_struct *th, struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, const struct rb_call_info *ci, struct rb_call_cache *cc)
Definition: vm_core.h:240
VALUE locking_mutex
Definition: vm_core.h:836
unsigned int local_table_size
Definition: vm_core.h:410
struct st_table * ensure_rollback_table
Definition: vm_core.h:559
RUBY_SYMBOL_EXPORT_BEGIN rb_iseq_t * rb_iseq_new(NODE *node, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum iseq_type)
Definition: iseq.c:458
RUBY_SYMBOL_EXPORT_END VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath)
Definition: iseq.c:217
struct rb_call_info * ci_entries
Definition: vm_core.h:401
void rb_signal_exec(rb_thread_t *th, int sig)
Definition: signal.c:1036
size_t fiber_vm_stack_size
Definition: vm_core.h:584
struct rb_vm_struct rb_vm_t
void * blocking_region_buffer
Definition: vm_core.h:817
void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg)
Definition: vm.c:2161
void rb_vm_rewind_cfp(rb_thread_t *th, rb_control_frame_t *cfp)
Definition: vm.c:544
VALUE load_path_check_cache
Definition: vm_core.h:542
struct rb_ensure_list rb_ensure_list_t
void rb_threadptr_exec_event_hooks_and_pop_frame(struct rb_trace_arg_struct *trace_arg)
Definition: vm_trace.c:364
VALUE rb_iseq_realpath(const rb_iseq_t *iseq)
Definition: iseq.c:698
rb_control_frame_t * cfp
Definition: vm_core.h:1640
VALUE tag
Definition: vm_core.h:697
#define offsetof(p_type, field)
Definition: addrinfo.h:186
Definition: vm_core.h:197
VALUE top_self
Definition: vm_core.h:804
int postponed_job_index
Definition: vm_core.h:563
int argc
Definition: ruby.c:187
vm_special_object_type
Definition: vm_core.h:974
struct st_table * loading_table
Definition: vm_core.h:547
rb_thread_status
Definition: vm_core.h:683
void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
Definition: vm.c:831
struct rb_iseq_location_struct rb_iseq_location_t
ruby_basic_operators
Definition: vm_core.h:463
union rb_captured_block::@141 code
Definition: method.h:59
VALUE rb_iseq_coverage(const rb_iseq_t *iseq)
Definition: iseq.c:743
void rb_execution_context_mark(const rb_execution_context_t *ec)
Definition: vm.c:2371
void rb_threadptr_signal_exit(rb_thread_t *th)
Definition: thread.c:2159
VALUE flags
Definition: vm_core.h:421
int8_t is_from_method
Definition: vm_core.h:913
volatile int sleeper
Definition: vm_core.h:532
rb_iseq_t * rb_iseq_new_top(NODE *node, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent)
Definition: iseq.c:466
rb_serial_t method_state
Definition: vm_core.h:244
struct rb_event_hook_struct * hooks
Definition: vm_core.h:509
#define VM_ENV_DATA_INDEX_SPECVAL
Definition: vm_core.h:1050
unsigned long rb_num_t
Definition: vm_core.h:149
int rb_signal_buff_size(void)
Definition: signal.c:711
const char * ruby_node_name(int node)
Definition: iseq.c:1767
Definition: vm_core.h:188
rb_hook_list_t event_hooks
Definition: vm_core.h:849
void rb_vm_pop_frame(rb_thread_t *th)
struct rb_call_cache * cc_entries
Definition: vm_core.h:406
const VALUE pathobj
Definition: vm_core.h:932
union rb_block::@142 as
#define RUBY_EXTERN
Definition: missing.h:77
VALUE rb_obj_is_proc(VALUE)
Definition: proc.c:116
vm_call_flag_bits
Definition: vm_core.h:947
struct rb_mutex_struct * keeping_mutexes
Definition: vm_core.h:837
unsigned long rb_serial_t
Definition: internal.h:751
VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt)
Definition: error.c:1083
VALUE rb_vm_make_proc_lambda(rb_thread_t *th, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
Definition: vm.c:874
NORETURN(void rb_bug_context(const void *, const char *fmt,...))
VALUE loaded_features
Definition: vm_core.h:544
#define VM_ASSERT(expr)
Definition: vm_core.h:53
unsigned int ambiguous_param0
Definition: vm_core.h:349
VALUE proc
Definition: vm_core.h:657
#define RUBY_SYMBOL_EXPORT_END
Definition: missing.h:49
rb_event_flag_t ruby_vm_event_flags
Definition: vm.c:322
void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
Definition: thread.c:1595
struct list_head waiting_fds
Definition: vm_core.h:523
unsigned char buf[MIME_BUF_SIZE]
Definition: nkf.c:4309
unsigned long ID
Definition: ruby.h:86
size_t thread_vm_stack_size
Definition: vm_core.h:582
void rb_vm_bugreport(const void *)
Definition: vm_dump.c:950
VALUE rb_vm_make_binding(rb_thread_t *th, const rb_control_frame_t *src_cfp)
Definition: vm.c:895
unsigned int env_size
Definition: vm_core.h:922
rb_control_frame_t * rb_vm_get_ruby_level_next_cfp(const rb_thread_t *th, const rb_control_frame_t *cfp)
Definition: vm.c:498
rb_block_handler_type
Definition: vm_core.h:639
struct rb_vm_protect_tag * protect_tag
Definition: vm_core.h:747
int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child)
Disassemble a instruction Iseq -> Iseq inspect object.
Definition: iseq.c:1431
IFUNC (Internal FUNCtion)
Definition: internal.h:917
#define debug(x)
Definition: _sdbm.c:51
unsigned long VALUE
Definition: ruby.h:85
const rb_data_type_t ruby_threadptr_data_type
Definition: vm.c:2495
method_missing_reason
Definition: vm_core.h:205
RUBY_JMP_BUF rb_jmpbuf_t
Definition: vm_core.h:690
struct rb_thread_struct * main_thread
Definition: vm_core.h:520
ruby_vm_throw_flags
Definition: vm_core.h:175
int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2048
unsigned int has_post
Definition: vm_core.h:344
VALUE first_proc
Definition: vm_core.h:841
#define RUBY_SYMBOL_EXPORT_BEGIN
Definition: missing.h:48
RUBY_SYMBOL_EXPORT_BEGIN int rb_thread_check_trap_pending(void)
Definition: thread.c:1230
struct rb_at_exit_list * next
Definition: vm_core.h:501
unsigned int ci_kw_size
Definition: vm_core.h:413
unsigned int ci_size
Definition: vm_core.h:412
#define VM_ENV_DATA_INDEX_FLAGS
Definition: vm_core.h:1051
const struct vm_ifunc * ifunc
Definition: vm_core.h:634
CREF (Class REFerence)
Definition: method.h:41
rb_atomic_t interrupt_flag
Definition: vm_core.h:832
struct rb_calling_info * calling
Definition: vm_core.h:801
int rb_backtrace_iter_func(void *, VALUE, int, VALUE)
Definition: vm_core.h:1531
VALUE rb_binding_alloc(VALUE klass)
Definition: proc.c:300
struct rb_ensure_list * next
Definition: vm_core.h:732
rb_serial_t ic_serial
Definition: vm_core.h:189
union iseq_inline_cache_entry::@132 ic_value
const void * block_code
Definition: vm_core.h:668
const rb_callable_method_entry_t * passed_bmethod_me
Definition: vm_core.h:798
struct rb_captured_block captured
Definition: vm_core.h:655
struct rb_at_exit_list rb_at_exit_list
#define VM_ENV_DATA_INDEX_ENV
Definition: vm_core.h:1052
unsigned int uint32_t
Definition: sha2.h:101
const ID * table
Definition: vm_core.h:381
void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath)
Definition: iseq.c:236
ruby_special_exceptions
Definition: vm_core.h:454
int8_t is_lambda
Definition: vm_core.h:914
void rb_vm_change_state(void)
#define PATHOBJ_PATH
Definition: vm_core.h:273
const rb_cref_t * ic_cref
Definition: vm_core.h:190
VALUE first_args
Definition: vm_core.h:842
rb_iseq_t * rb_iseq_compile_on_base(VALUE src, VALUE file, VALUE line, const struct rb_block *base_block)
Definition: iseq.c:686
void rb_vm_gvl_destroy(rb_vm_t *vm)
Definition: thread.c:348
struct rb_objspace * objspace
Definition: vm_core.h:573
void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
Definition: vm.c:271
VALUE top_wrapper
Definition: vm_core.h:805
const struct iseq_catch_table * catch_table
Definition: vm_core.h:394
#define RARRAY_AREF(a, i)
Definition: ruby.h:1033
rb_serial_t class_serial
Definition: vm_core.h:245
size_t index
Definition: vm_core.h:192
struct rb_execution_context_struct rb_execution_context_t
unsigned long interrupt_mask
Definition: vm_core.h:833
void rb_threadptr_interrupt(rb_thread_t *th)
Definition: thread.c:433
#define ANYARGS
Definition: defines.h:173
unsigned int abort_on_exception
Definition: vm_core.h:857
VALUE marker
Definition: vm_core.h:726
rb_hook_list_t event_hooks
Definition: vm_core.h:556
#define FUNC_FASTCALL(x)
Definition: vm_core.h:998
struct rb_iseq_struct * local_iseq
Definition: vm_core.h:398
VALUE block_handler
Definition: vm_core.h:234
struct rb_vm_tag * tag
Definition: vm_core.h:746
rb_nativethread_id_t thread_id
Definition: vm_core.h:808
void rb_threadptr_error_print(rb_thread_t *volatile th, volatile VALUE errinfo)
Definition: eval_error.c:167
rb_nativethread_lock_t thread_destruct_lock
Definition: vm_core.h:518
rb_control_frame_t *FUNC_FASTCALL rb_insn_func_t(rb_thread_t *, rb_control_frame_t *)
Definition: vm_core.h:1002
uint32_t running_time_us
Definition: vm_core.h:862
#define T_STRING
Definition: ruby.h:496
const VALUE * pc
Definition: vm_core.h:663
VALUE CDHASH
Definition: vm_core.h:995
struct rb_call_cache * CALL_CACHE
Definition: vm_core.h:991
struct iseq_inline_cache_entry * IC
Definition: vm_core.h:989
const VALUE * rb_vm_ep_local_ep(const VALUE *ep)
Definition: vm.c:55
native_thread_data_t native_thread_data
Definition: vm_core.h:816
size_t thread_machine_stack_size
Definition: vm_core.h:583
struct rb_thread_struct * running_thread
Definition: vm_core.h:199
VALUE load_path_snapshot
Definition: vm_core.h:541
unsigned int thread_report_on_exception
Definition: vm_core.h:530
#define VM_UNREACHABLE(func)
Definition: vm_core.h:54
void rb_reset_coverages(void)
Definition: thread.c:5072
void rb_vm_at_exit_func(struct rb_vm_struct *)
Definition: vm_core.h:497
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
Definition: vm.c:741
int rb_vm_get_sourceline(const rb_control_frame_t *)
Definition: vm_backtrace.c:38
void rb_vm_inc_const_missing_count(void)
Definition: vm.c:330
VALUE loaded_features_snapshot
Definition: vm_core.h:545
const rb_iseq_t * iseq
Definition: vm_core.h:633
rb_jmpbuf_t root_jmpbuf
Definition: vm_core.h:853
int rb_atomic_t
Definition: ruby_atomic.h:120
const struct rb_iseq_struct * parent_iseq
Definition: vm_core.h:397
int rb_thread_method_id_and_class(rb_thread_t *th, ID *idp, ID *called_idp, VALUE *klassp)
Definition: vm.c:2064
rb_execution_context_t ec
Definition: vm_core.h:790
rb_global_vm_lock_t gvl
Definition: vm_core.h:517
const struct iseq_line_info_entry * line_info_table
Definition: vm_core.h:389
char rb_thread_id_string_t[sizeof(rb_nativethread_id_t) *2+3]
Definition: vm_core.h:736
const char * name
Definition: nkf.c:208
const VALUE * opt_table
Definition: vm_core.h:361
rb_event_flag_t event
Definition: vm_core.h:1638
void rb_vmdebug_debug_print_post(rb_thread_t *th, rb_control_frame_t *cfp)
Definition: vm_dump.c:362
struct rb_objspace * rb_objspace_alloc(void)
Definition: gc.c:1327
rb_jmpbuf_t buf
Definition: vm_core.h:699
rb_iseq_t * rb_iseq_new_with_opt(NODE *node, VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_iseq_t *parent, enum iseq_type, const rb_compile_option_t *)
Definition: iseq.c:495
const VALUE * rb_vm_proc_local_ep(VALUE proc)
Definition: thread.c:565
unsigned int line_info_size
Definition: vm_core.h:414
enum rb_block_type type
Definition: vm_core.h:659
struct rb_thread_list_struct * next
Definition: vm_core.h:721
struct rb_thread_struct rb_thread_t
unsigned int running
Definition: vm_core.h:528
uint32_t rb_event_flag_t
Definition: ruby.h:2116
struct rb_vm_tag * prev
Definition: vm_core.h:700
struct list_head living_threads
Definition: vm_core.h:524
unsigned int has_opt
Definition: vm_core.h:342
#define RTYPEDDATA_DATA(v)
Definition: ruby.h:1110
struct rb_call_info * CALL_INFO
Definition: vm_core.h:990
rb_iseq_t * rb_iseq_compile_with_option(VALUE src, VALUE file, VALUE realpath, VALUE line, const struct rb_block *base_block, VALUE opt)
Definition: iseq.c:632
VALUE retval
Definition: vm_core.h:698
rb_block_type
Definition: vm_core.h:646
#define GC_GUARDED_PTR_REF(p)
Definition: vm_core.h:1008
struct rb_postponed_job_struct * postponed_job_buffer
Definition: vm_core.h:562
vm_svar_index
Definition: vm_core.h:980
VALUE rb_iseq_path(const rb_iseq_t *iseq)
Definition: iseq.c:692
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:834
VALUE load_path
Definition: vm_core.h:540
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4029
void rb_threadptr_exec_event_hooks(struct rb_trace_arg_struct *trace_arg)
Definition: vm_trace.c:370
VALUE pending_interrupt_queue
Definition: vm_core.h:828
RUBY_EXTERN VALUE rb_mRubyVMFrozenCore
Definition: vm_core.h:904
Definition: iseq.h:143
void rb_threadptr_execute_interrupts(rb_thread_t *, int)
Definition: thread.c:2029
unsigned int size
Definition: vm_core.h:352
#define SYMBOL_P(x)
Definition: ruby.h:382
#define mod(x, y)
Definition: date_strftime.c:28
#define env
#define NULL
Definition: _sdbm.c:102
#define Qundef
Definition: ruby.h:439
VALUE stat_insn_usage
Definition: vm_core.h:846
RUBY_SYMBOL_EXPORT_BEGIN VALUE rb_iseq_eval(const rb_iseq_t *iseq)
Definition: vm.c:2027
RUBY_EXTERN VALUE rb_cISeq
Definition: vm_core.h:902
VALUE rb_vm_invoke_proc(rb_thread_t *th, rb_proc_t *proc, int argc, const VALUE *argv, VALUE block_handler)
Definition: vm.c:1172
VALUE coverages
Definition: vm_core.h:568
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Definition: error.c:769
char ** argv
Definition: ruby.c:188
#define PATHOBJ_REALPATH
Definition: vm_core.h:274
rb_iseq_location_t location
Definition: vm_core.h:386
#define VM_TAGGED_PTR_SET(p, tag)
Definition: vm_core.h:1004