14 #define rb_data_object_alloc rb_data_object_alloc 15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc 34 #include <sys/types.h> 38 #undef rb_data_object_wrap 40 #ifndef HAVE_MALLOC_USABLE_SIZE 42 # define HAVE_MALLOC_USABLE_SIZE 43 # define malloc_usable_size(a) _msize(a) 44 # elif defined HAVE_MALLOC_SIZE 45 # define HAVE_MALLOC_USABLE_SIZE 46 # define malloc_usable_size(a) malloc_size(a) 49 #ifdef HAVE_MALLOC_USABLE_SIZE 52 # elif defined(HAVE_MALLOC_NP_H) 53 # include <malloc_np.h> 54 # elif defined(HAVE_MALLOC_MALLOC_H) 55 # include <malloc/malloc.h> 60 __has_feature(address_sanitizer) || \ 61 defined(__SANITIZE_ADDRESS__) 62 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS \ 63 __attribute__((no_address_safety_analysis)) \ 64 __attribute__((noinline)) 66 #define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS 69 #ifdef HAVE_SYS_TIME_H 73 #ifdef HAVE_SYS_RESOURCE_H 74 #include <sys/resource.h> 76 #if defined(__native_client__) && defined(NACL_NEWLIB) 78 # undef HAVE_POSIX_MEMALIGN 83 #if defined _WIN32 || defined __CYGWIN__ 85 #elif defined(HAVE_POSIX_MEMALIGN) 86 #elif defined(HAVE_MEMALIGN) 90 #define rb_setjmp(env) RUBY_SETJMP(env) 91 #define rb_jmp_buf rb_jmpbuf_t 93 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL 105 #ifndef GC_HEAP_INIT_SLOTS 106 #define GC_HEAP_INIT_SLOTS 10000 108 #ifndef GC_HEAP_FREE_SLOTS 109 #define GC_HEAP_FREE_SLOTS 4096 111 #ifndef GC_HEAP_GROWTH_FACTOR 112 #define GC_HEAP_GROWTH_FACTOR 1.8 114 #ifndef GC_HEAP_GROWTH_MAX_SLOTS 115 #define GC_HEAP_GROWTH_MAX_SLOTS 0 117 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR 118 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0 121 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO 122 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20 124 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO 125 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40 127 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO 128 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65 131 #ifndef GC_MALLOC_LIMIT_MIN 132 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 ) 134 #ifndef GC_MALLOC_LIMIT_MAX 135 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 ) 137 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR 138 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4 141 #ifndef GC_OLDMALLOC_LIMIT_MIN 142 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 ) 144 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 145 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2 147 #ifndef GC_OLDMALLOC_LIMIT_MAX 148 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 ) 151 #ifndef PRINT_MEASURE_LINE 152 #define PRINT_MEASURE_LINE 0 154 #ifndef PRINT_ENTER_EXIT_TICK 155 #define PRINT_ENTER_EXIT_TICK 0 157 #ifndef PRINT_ROOT_TICKS 158 #define PRINT_ROOT_TICKS 0 161 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS) 225 #define RGENGC_DEBUG -1 227 #define RGENGC_DEBUG 0 230 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER) 231 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level)) 233 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level)) 245 #ifndef RGENGC_CHECK_MODE 246 #define RGENGC_CHECK_MODE 0 249 #if RGENGC_CHECK_MODE > 0 250 #define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr) 252 #define GC_ASSERT(expr) ((void)0) 261 #ifndef RGENGC_OLD_NEWOBJ_CHECK 262 #define RGENGC_OLD_NEWOBJ_CHECK 0 270 #ifndef RGENGC_PROFILE 271 #define RGENGC_PROFILE 0 280 #ifndef RGENGC_ESTIMATE_OLDMALLOC 281 #define RGENGC_ESTIMATE_OLDMALLOC 1 287 #ifndef RGENGC_FORCE_MAJOR_GC 288 #define RGENGC_FORCE_MAJOR_GC 0 296 #define RGENGC_DEBUG 0 297 #ifdef RGENGC_CHECK_MODE 298 #undef RGENGC_CHECK_MODE 300 #define RGENGC_CHECK_MODE 0 301 #define RGENGC_PROFILE 0 302 #define RGENGC_ESTIMATE_OLDMALLOC 0 303 #define RGENGC_FORCE_MAJOR_GC 0 307 #ifndef GC_PROFILE_MORE_DETAIL 308 #define GC_PROFILE_MORE_DETAIL 0 310 #ifndef GC_PROFILE_DETAIL_MEMORY 311 #define GC_PROFILE_DETAIL_MEMORY 0 313 #ifndef GC_ENABLE_INCREMENTAL_MARK 314 #define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC 316 #ifndef GC_ENABLE_LAZY_SWEEP 317 #define GC_ENABLE_LAZY_SWEEP 1 319 #ifndef CALC_EXACT_MALLOC_SIZE 320 #define CALC_EXACT_MALLOC_SIZE 0 322 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0 323 #ifndef MALLOC_ALLOCATED_SIZE 324 #define MALLOC_ALLOCATED_SIZE 0 327 #define MALLOC_ALLOCATED_SIZE 0 329 #ifndef MALLOC_ALLOCATED_SIZE_CHECK 330 #define MALLOC_ALLOCATED_SIZE_CHECK 0 333 #ifndef GC_DEBUG_STRESS_TO_CLASS 334 #define GC_DEBUG_STRESS_TO_CLASS 0 337 #ifndef RGENGC_OBJ_INFO 338 #define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE) 348 #if RGENGC_ESTIMATE_OLDMALLOC 375 #if GC_PROFILE_MORE_DETAIL 377 double gc_sweep_time;
379 size_t heap_use_pages;
380 size_t heap_live_objects;
381 size_t heap_free_objects;
383 size_t allocate_increase;
384 size_t allocate_limit;
387 size_t removing_objects;
388 size_t empty_objects;
389 #if GC_PROFILE_DETAIL_MEMORY 395 #if MALLOC_ALLOCATED_SIZE 396 size_t allocated_size;
399 #if RGENGC_PROFILE > 0 401 size_t remembered_normal_objects;
402 size_t remembered_shady_objects;
406 #if defined(_MSC_VER) || defined(__CYGWIN__) 407 #pragma pack(push, 1) 457 #if defined(_MSC_VER) || defined(__CYGWIN__) 482 #define STACK_CHUNK_SIZE 500 505 #if GC_ENABLE_INCREMENTAL_MARK 522 #if MALLOC_ALLOCATED_SIZE 523 size_t allocated_size;
539 #if GC_ENABLE_INCREMENTAL_MARK 556 void (*mark_func)(
VALUE v,
void *data);
585 #if GC_PROFILE_MORE_DETAIL 593 #if RGENGC_PROFILE > 0 594 size_t total_generated_normal_object_count;
595 size_t total_generated_shady_object_count;
596 size_t total_shade_operation_count;
597 size_t total_promoted_count;
598 size_t total_remembered_normal_object_count;
599 size_t total_remembered_shady_object_count;
601 #if RGENGC_PROFILE >= 2 602 size_t generated_normal_object_count_types[
RUBY_T_MASK];
603 size_t generated_shady_object_count_types[
RUBY_T_MASK];
606 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
607 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
637 #if RGENGC_ESTIMATE_OLDMALLOC 642 #if RGENGC_CHECK_MODE >= 2 647 #if GC_ENABLE_INCREMENTAL_MARK 655 #if GC_DEBUG_STRESS_TO_CLASS 661 #ifndef HEAP_PAGE_ALIGN_LOG 663 #define HEAP_PAGE_ALIGN_LOG 14 665 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod)) 705 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK))) 706 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header) 707 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page) 709 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE)) 710 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH ) 711 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1)) 712 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p)) 715 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p)) 716 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p)) 717 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p)) 720 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0]) 722 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0]) 723 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0]) 724 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0]) 728 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE 729 #define rb_objspace (*rb_objspace_of(GET_VM())) 730 #define rb_objspace_of(vm) ((vm)->objspace) 733 #define rb_objspace_of(vm) (&rb_objspace) 736 #define ruby_initial_gc_stress gc_params.gc_stress 740 #define malloc_limit objspace->malloc_params.limit 741 #define malloc_increase objspace->malloc_params.increase 742 #define malloc_allocated_size objspace->malloc_params.allocated_size 743 #define heap_pages_sorted objspace->heap_pages.sorted 744 #define heap_allocated_pages objspace->heap_pages.allocated_pages 745 #define heap_pages_sorted_length objspace->heap_pages.sorted_length 746 #define heap_pages_lomem objspace->heap_pages.range[0] 747 #define heap_pages_himem objspace->heap_pages.range[1] 748 #define heap_allocatable_pages objspace->heap_pages.allocatable_pages 749 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages 750 #define heap_pages_final_slots objspace->heap_pages.final_slots 751 #define heap_pages_deferred_final objspace->heap_pages.deferred_final 752 #define heap_eden (&objspace->eden_heap) 753 #define heap_tomb (&objspace->tomb_heap) 754 #define dont_gc objspace->flags.dont_gc 755 #define during_gc objspace->flags.during_gc 756 #define finalizing objspace->atomic_flags.finalizing 757 #define finalizer_table objspace->finalizer_table 758 #define global_list objspace->global_list 759 #define ruby_gc_stressful objspace->flags.gc_stressful 760 #define ruby_gc_stress_mode objspace->gc_stress_mode 761 #if GC_DEBUG_STRESS_TO_CLASS 762 #define stress_to_class objspace->stress_to_class 764 #define stress_to_class 0 768 gc_mode_verify(
enum gc_mode mode)
770 #if RGENGC_CHECK_MODE > 0 777 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
783 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode) 784 #define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode)) 786 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking) 787 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping) 789 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE) 791 #define is_full_marking(objspace) TRUE 793 #if GC_ENABLE_INCREMENTAL_MARK 794 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE) 796 #define is_incremental_marking(objspace) FALSE 798 #if GC_ENABLE_INCREMENTAL_MARK 799 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE) 801 #define will_be_incremental_marking(objspace) FALSE 803 #define has_sweeping_pages(heap) ((heap)->sweep_pages != 0) 804 #define is_lazy_sweeping(heap) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(heap)) 806 #if SIZEOF_LONG == SIZEOF_VOIDP 807 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG) 808 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) 809 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP 810 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2) 811 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \ 812 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1)) 814 # error not supported 817 #define RANY(o) ((RVALUE*)(o)) 822 void (*dfree)(
void *);
826 #define RZOMBIE(o) ((struct RZombie *)(o)) 828 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory] 839 static void rb_objspace_call_finalizer(
rb_objspace_t *objspace);
842 static void negative_size_allocation_error(
const char *);
843 static void *aligned_malloc(
size_t,
size_t);
844 static void aligned_free(
void *);
850 static int garbage_collect(
rb_objspace_t *,
int full_mark,
int immediate_mark,
int immediate_sweep,
int reason);
852 static int gc_start(
rb_objspace_t *objspace,
const int full_mark,
const int immediate_mark,
const unsigned int immediate_sweep,
int reason);
854 static inline void gc_enter(
rb_objspace_t *objspace,
const char *event);
855 static inline void gc_exit(
rb_objspace_t *objspace,
const char *event);
857 static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
858 static void gc_marks_start(
rb_objspace_t *objspace,
int full);
861 #if GC_ENABLE_INCREMENTAL_MARK 862 static void gc_marks_step(
rb_objspace_t *objspace,
int slots);
871 #if GC_ENABLE_LAZY_SWEEP 885 static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr);
890 static void shrink_stack_chunk_cache(
mark_stack_t *stack);
892 static size_t obj_memsize_of(
VALUE obj,
int use_all_types);
893 static VALUE gc_verify_internal_consistency(
VALUE self);
899 static double getrusage_time(
void);
900 static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason);
903 static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
905 static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
906 static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
910 #define gc_prof_record(objspace) (objspace)->profile.current_record 911 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record) 913 #ifdef HAVE_VA_ARGS_MACRO 914 # define gc_report(level, objspace, ...) \ 915 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__) 917 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body 920 static const char *obj_info(
VALUE obj);
922 #define PUSH_MARK_FUNC_DATA(v) do { \ 923 struct mark_func_data_struct *prev_mark_func_data = objspace->mark_func_data; \ 924 objspace->mark_func_data = (v); 926 #define POP_MARK_FUNC_DATA() objspace->mark_func_data = prev_mark_func_data;} while (0) 946 #if defined(__GNUC__) && defined(__i386__) 947 typedef unsigned long long tick_t;
948 #define PRItick "llu" 952 unsigned long long int x;
953 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
957 #elif defined(__GNUC__) && defined(__x86_64__) 958 typedef unsigned long long tick_t;
959 #define PRItick "llu" 961 static __inline__ tick_t
964 unsigned long hi,
lo;
965 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
966 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
969 #elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0) 970 typedef unsigned long long tick_t;
971 #define PRItick "llu" 973 static __inline__ tick_t
976 unsigned long long val = __builtin_ppc_get_timebase();
980 #elif defined(_WIN32) && defined(_MSC_VER) 982 typedef unsigned __int64 tick_t;
983 #define PRItick "llu" 992 typedef clock_t tick_t;
993 #define PRItick "llu" 1002 #elif TICK_TYPE == 2 1003 typedef double tick_t;
1004 #define PRItick "4.9f" 1006 static inline tick_t
1009 return getrusage_time();
1012 #error "choose tick type" 1015 #define MEASURE_LINE(expr) do { \ 1016 volatile tick_t start_time = tick(); \ 1017 volatile tick_t end_time; \ 1019 end_time = tick(); \ 1020 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \ 1024 #define MEASURE_LINE(expr) expr 1027 #define FL_CHECK2(name, x, pred) \ 1028 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \ 1029 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred)) 1030 #define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0) 1031 #define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f)) 1032 #define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f)) 1034 #define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj)) 1035 #define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj)) 1038 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj)) 1039 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj)) 1040 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj)) 1042 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj)) 1043 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj)) 1044 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj)) 1046 #define RVALUE_OLD_AGE 3 1047 #define RVALUE_AGE_SHIFT 5 1063 #if RGENGC_CHECK_MODE == 0 1065 check_rvalue_consistency(
const VALUE obj)
1071 check_rvalue_consistency(
const VALUE obj)
1076 rb_bug(
"check_rvalue_consistency: %p is a special const.", (
void *)obj);
1078 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1079 rb_bug(
"check_rvalue_consistency: %p is not a Ruby object.", (
void *)obj);
1086 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1096 if (age > 0 && wb_unprotected_bit) {
1097 rb_bug(
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.", obj_info(obj), age);
1100 if (!
is_marking(objspace) && uncollectible_bit && !mark_bit) {
1101 rb_bug(
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.", obj_info(obj));
1105 if (uncollectible_bit && age !=
RVALUE_OLD_AGE && !wb_unprotected_bit) {
1106 rb_bug(
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.", obj_info(obj), age);
1109 rb_bug(
"check_rvalue_consistency: %s is rememberd, but not old (age: %d).", obj_info(obj), age);
1121 if (!
is_marking(objspace) && !mark_bit)
rb_bug(
"check_rvalue_consistency: %s is marking, but not marked.", obj_info(obj));
1129 RVALUE_MARKED(
VALUE obj)
1131 check_rvalue_consistency(obj);
1137 RVALUE_WB_UNPROTECTED(
VALUE obj)
1139 check_rvalue_consistency(obj);
1144 RVALUE_MARKING(
VALUE obj)
1146 check_rvalue_consistency(obj);
1151 RVALUE_REMEMBERED(
VALUE obj)
1153 check_rvalue_consistency(obj);
1158 RVALUE_UNCOLLECTIBLE(
VALUE obj)
1160 check_rvalue_consistency(obj);
1165 RVALUE_OLD_P_RAW(
VALUE obj)
1168 return (
RBASIC(obj)->flags & promoted) == promoted;
1172 RVALUE_OLD_P(
VALUE obj)
1174 check_rvalue_consistency(obj);
1175 return RVALUE_OLD_P_RAW(obj);
1178 #if RGENGC_CHECK_MODE || GC_DEBUG 1180 RVALUE_AGE(
VALUE obj)
1182 check_rvalue_consistency(obj);
1183 return RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1193 #if RGENGC_PROFILE >= 2 1194 objspace->
profile.total_promoted_count++;
1202 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace,
GET_HEAP_PAGE(obj), obj);
1206 RVALUE_FLAGS_AGE_SET(
VALUE flags,
int age)
1218 int age = RVALUE_FLAGS_AGE(flags);
1221 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1225 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1228 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1230 check_rvalue_consistency(obj);
1237 check_rvalue_consistency(obj);
1241 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1243 check_rvalue_consistency(obj);
1250 check_rvalue_consistency(obj);
1255 check_rvalue_consistency(obj);
1261 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1268 check_rvalue_consistency(obj);
1275 RVALUE_DEMOTE_RAW(objspace, obj);
1277 if (RVALUE_MARKED(obj)) {
1281 check_rvalue_consistency(obj);
1285 RVALUE_AGE_RESET_RAW(
VALUE obj)
1287 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1291 RVALUE_AGE_RESET(
VALUE obj)
1293 check_rvalue_consistency(obj);
1296 RVALUE_AGE_RESET_RAW(obj);
1297 check_rvalue_consistency(obj);
1301 RVALUE_BLACK_P(
VALUE obj)
1303 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1308 RVALUE_GREY_P(
VALUE obj)
1310 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1315 RVALUE_WHITE_P(
VALUE obj)
1317 return RVALUE_MARKED(obj) ==
FALSE;
1329 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE 1346 rb_bug(
"lazy sweeping underway when freeing object space");
1366 heap_allocated_pages = 0;
1376 #if !(defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE) 1377 if (objspace == &rb_objspace)
return;
1383 heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1388 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %d, size: %d\n", (
int)next_length, (
int)size);
1418 heap_pages_expand_sorted_to(objspace, next_length);
1426 heap_allocatable_pages_set(
rb_objspace_t *objspace,
size_t s)
1429 heap_pages_expand_sorted(objspace);
1442 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", p);
1445 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
1457 #if GC_ENABLE_INCREMENTAL_MARK 1504 heap_unlink_page(objspace,
heap_tomb, page);
1505 heap_page_free(objspace, page);
1529 if (page_body == 0) {
1536 aligned_free(page_body);
1547 end = start + limit;
1555 mid = (lo +
hi) / 2;
1557 if (mid_page->
start < start) {
1560 else if (mid_page->
start > start) {
1594 for (p = start; p != end; p++) {
1595 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", p);
1596 heap_page_add_freeobj(objspace, page, (
VALUE)p);
1610 heap_unlink_page(objspace,
heap_tomb, page);
1625 const char *method =
"recycle";
1629 page = heap_page_resurrect(objspace);
1632 page = heap_page_allocate(objspace);
1633 method =
"allocate";
1635 if (0) fprintf(stderr,
"heap_page_create: %s - %p, heap_allocated_pages: %d, heap_allocated_pages: %d, tomb->total_pages: %d\n",
1654 struct heap_page *page = heap_page_create(objspace);
1655 heap_add_page(objspace, heap, page);
1656 heap_add_freepage(objspace, heap, page);
1664 heap_allocatable_pages_set(objspace, add);
1666 for (i = 0; i <
add; i++) {
1667 heap_assign_page(objspace, heap);
1680 if (goal_ratio == 0.0) {
1687 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1690 if (f < 1.0) f = 1.1;
1692 next_used = (size_t)(f * used);
1697 " G(%1.2f), f(%1.2f)," 1699 free_slots, total_slots, free_slots/(
double)total_slots,
1700 goal_ratio, f, used, next_used);
1706 if (next_used > max_used) next_used = max_used;
1709 return next_used - used;
1713 heap_set_increment(
rb_objspace_t *objspace,
size_t additional_pages)
1716 size_t next_used_limit = used + additional_pages;
1720 heap_allocatable_pages_set(objspace, next_used_limit - used);
1729 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %d, heap_pages_inc: %d, heap->total_pages: %d\n",
1732 GC_ASSERT(heap_allocatable_pages +
heap_eden->total_pages <= heap_pages_sorted_length);
1735 heap_assign_page(objspace, heap);
1746 #if GC_ENABLE_LAZY_SWEEP 1748 gc_sweep_continue(objspace, heap);
1751 #if GC_ENABLE_INCREMENTAL_MARK 1753 gc_marks_continue(objspace, heap);
1771 heap_prepare(objspace, heap);
1805 p = heap_get_freeobj_from_next_freepage(objspace, heap);
1824 #define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook) 1825 #define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event)) 1827 #define gc_event_hook(objspace, event, data) do { \ 1828 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \ 1829 gc_event_hook_body(GET_THREAD(), (objspace), (event), (data)); \ 1842 RANY(obj)->as.values.v1 = v1;
1843 RANY(obj)->as.values.v2 = v2;
1844 RANY(obj)->as.values.v3 = v3;
1846 #if RGENGC_CHECK_MODE 1853 if (RVALUE_AGE(obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
1856 if (RVALUE_AGE(obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
1858 if (rgengc_remembered(objspace, (
VALUE)obj))
rb_bug(
"newobj: %s is remembered.", obj_info(obj));
1869 objspace->
profile.total_generated_normal_object_count++;
1870 #if RGENGC_PROFILE >= 2 1875 objspace->
profile.total_generated_shady_object_count++;
1876 #if RGENGC_PROFILE >= 2 1889 gc_report(5, objspace,
"newobj: %s\n", obj_info(obj));
1891 #if RGENGC_OLD_NEWOBJ_CHECK > 0 1896 flags & FL_WB_PROTECTED &&
1898 if (--newobj_cnt == 0) {
1901 gc_mark_set(objspace, obj);
1902 RVALUE_AGE_SET_OLD(objspace, obj);
1909 check_rvalue_consistency(obj);
1922 rb_bug(
"object allocation during garbage collection phase");
1932 obj = heap_get_freeobj(objspace,
heap_eden);
1933 newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
1944 return newobj_slowpath(klass, flags, v1, v2, v3, objspace,
TRUE);
1950 return newobj_slowpath(klass, flags, v1, v2, v3, objspace,
FALSE);
1959 #if GC_DEBUG_STRESS_TO_CLASS 1963 for (i = 0; i <
cnt; ++i) {
1972 return newobj_init(klass, flags, v1, v2, v3, wb_protected, objspace, obj);
1975 return wb_protected ?
1976 newobj_slowpath_wb_protected(klass, flags, v1, v2, v3, objspace) :
1977 newobj_slowpath_wb_unprotected(klass, flags, v1, v2, v3, objspace);
1985 return newobj_of(klass, flags, 0, 0, 0,
FALSE);
1992 return newobj_of(klass, flags, 0, 0, 0,
TRUE);
2023 return newobj_of(v0, flags, v1, v2, v3,
TRUE);
2031 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", memo,
imemo_type(memo), file, line);
2043 #undef rb_data_object_alloc 2064 #undef rb_data_typed_object_alloc 2107 register size_t hi,
lo, mid;
2116 mid = (lo +
hi) / 2;
2118 if (page->
start <= p) {
2132 free_const_entry_i(
VALUE value,
void *data)
2176 rb_bug(
"obj_free() called for broken object");
2188 #if RGENGC_CHECK_MODE 2189 #define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj)) 2190 CHECK(RVALUE_WB_UNPROTECTED);
2191 CHECK(RVALUE_MARKED);
2192 CHECK(RVALUE_MARKING);
2193 CHECK(RVALUE_UNCOLLECTIBLE);
2201 RANY(obj)->as.object.as.heap.ivptr) {
2202 xfree(
RANY(obj)->as.object.as.heap.ivptr);
2232 if (
RANY(obj)->as.klass.ptr)
2243 if (
RANY(obj)->as.hash.ntbl) {
2248 if (
RANY(obj)->as.regexp.ptr) {
2254 int free_immediately =
FALSE;
2255 void (*dfree)(
void *);
2260 dfree =
RANY(obj)->as.typeddata.type->function.dfree;
2261 if (0 && free_immediately == 0) {
2263 fprintf(stderr,
"not immediate -> %s\n",
RANY(obj)->as.typeddata.type->wrap_struct_name);
2267 dfree =
RANY(obj)->as.data.dfree;
2274 else if (free_immediately) {
2278 make_zombie(objspace, obj, dfree, data);
2285 if (
RANY(obj)->as.match.rmatch) {
2286 struct rmatch *rm =
RANY(obj)->as.match.rmatch;
2294 if (
RANY(obj)->as.file.fptr) {
2295 make_io_zombie(objspace, obj);
2335 RANY(obj)->as.rstruct.as.heap.ptr) {
2336 xfree((
void *)
RANY(obj)->as.rstruct.as.heap.ptr);
2372 make_zombie(objspace, obj, 0, 0);
2387 #if RGENGC_ESTIMATE_OLDMALLOC 2394 #ifdef USE_SIGALTSTACK 2398 void *tmp = th->altstack;
2416 objspace_each_objects(
VALUE arg)
2432 pstart = page->
start;
2444 incremental_enable(
void)
2493 int prev_dont_incremental = objspace->flags.dont_incremental;
2496 objspace->flags.dont_incremental =
TRUE;
2501 if (prev_dont_incremental) {
2502 objspace_each_objects((
VALUE)&args);
2516 objspace_each_objects((
VALUE)&args);
2525 internal_object_p(
VALUE obj)
2554 return internal_object_p(obj);
2558 os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
2563 for (; p != pend; p++) {
2565 if (!internal_object_p(v)) {
2635 return os_obj_of(of);
2664 should_be_callable(
VALUE block)
2672 should_be_finalizable(
VALUE obj)
2693 define_final(
int argc,
VALUE *argv,
VALUE os)
2698 should_be_finalizable(obj);
2703 should_be_callable(block);
2706 return define_final0(obj, block);
2722 table = (
VALUE)data;
2730 for (i = 0; i <
len; i++, ptr++) {
2750 should_be_finalizable(obj);
2751 should_be_callable(block);
2752 return define_final0(obj, block);
2764 table = (
VALUE)data;
2794 #define RESTORE_FINALIZER() (\ 2795 th->ec.cfp = saved.cfp, \ 2796 rb_set_safe_level_force(saved.safe), \ 2797 rb_set_errinfo(saved.errinfo)) 2802 saved.cfp = th->
ec.
cfp;
2810 for (i = saved.finished;
2812 saved.finished = ++i) {
2813 run_single_final(
RARRAY_AREF(table, i), saved.objid);
2816 #undef RESTORE_FINALIZER 2830 run_finalizer(objspace, zombie, (
VALUE)table);
2841 run_final(objspace, zombie);
2843 RZOMBIE(zombie)->basic.flags = 0;
2847 heap_page_add_freeobj(objspace,
GET_HEAP_PAGE(zombie), zombie);
2851 zombie = next_zombie;
2861 finalize_list(objspace, zombie);
2866 gc_finalize_deferred(
void *dmy)
2870 finalize_deferred(objspace);
2878 gc_finalize_deferred(0);
2885 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
2910 #if RGENGC_CHECK_MODE >= 2 2911 gc_verify_internal_consistency(
Qnil);
2913 rb_objspace_call_finalizer(&rb_objspace);
2927 finalize_deferred(objspace);
2941 run_finalizer(objspace, curr->
obj, curr->
table);
2952 gc_enter(objspace,
"rb_objspace_call_finalizer");
2964 p->as.free.flags = 0;
2966 RDATA(p)->dfree =
RANY(p)->as.typeddata.type->function.dfree;
2971 else if (
RANY(p)->as.data.dfree) {
2972 make_zombie(objspace, (
VALUE)p,
RANY(p)->as.data.dfree,
RANY(p)->as.data.data);
2976 if (
RANY(p)->as.file.fptr) {
2977 make_io_zombie(objspace, (
VALUE)p);
2985 gc_exit(objspace,
"rb_objspace_call_finalizer");
3000 if (!is_pointer_to_heap(objspace, (
void *)ptr))
return FALSE;
3016 if (heap_is_swept_object(objspace,
heap_eden, ptr)) {
3029 is_swept_object(objspace, ptr) ||
3048 if (!is_garbage_object(objspace, ptr)) {
3059 if (rb_special_const_p(obj))
return FALSE;
3060 check_rvalue_consistency(obj);
3068 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
3075 return is_garbage_object(objspace, obj);
3094 #if SIZEOF_LONG == SIZEOF_VOIDP 3095 #define NUM2PTR(x) NUM2ULONG(x) 3096 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP 3097 #define NUM2PTR(x) NUM2ULL(x) 3113 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
3120 if (!is_id_value(objspace, ptr)) {
3123 if (!is_live_object(objspace, ptr)) {
3126 if (
RBASIC(ptr)->klass == 0) {
3192 #if SIZEOF_LONG == SIZEOF_VOIDP 3207 obj_memsize_of(
VALUE obj,
int use_all_types)
3222 ROBJECT(obj)->as.heap.ivptr) {
3238 if (
RCLASS(obj)->ptr->iv_tbl) {
3241 if (
RCLASS(obj)->ptr->const_tbl) {
3261 if (
RHASH(obj)->ntbl) {
3278 size +=
sizeof(
struct rmatch);
3282 if (
RFILE(obj)->fptr) {
3290 size +=
RANY(obj)->as.imemo.alloc.cnt *
sizeof(
VALUE);
3319 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
3323 return size +
sizeof(
RVALUE);
3329 return obj_memsize_of(obj,
TRUE);
3377 count_objects(
int argc,
VALUE *argv,
VALUE os)
3391 for (i = 0; i <=
T_MASK; i++) {
3400 for (;p < pend; p++) {
3420 for (i = 0; i <=
T_MASK; i++) {
3423 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break; 3451 default: type =
INT2NUM(i);
break;
3485 gc_setup_mark_bits(
struct heap_page *page)
3500 int empty_slots = 0, freed_slots = 0,
final_slots = 0;
3501 RVALUE *p, *pend,*offset;
3504 gc_report(2, objspace,
"page_sweep: start.\n");
3524 gc_report(2, objspace,
"page_sweep: free %s\n", obj_info((
VALUE)p));
3525 #if USE_RGENGC && RGENGC_CHECK_MODE 3527 if (RVALUE_OLD_P((
VALUE)p))
rb_bug(
"page_sweep: %s - old while minor GC.", obj_info((
VALUE)p));
3528 if (rgengc_remembered(objspace, (
VALUE)p))
rb_bug(
"page_sweep: %s - remembered.", obj_info((
VALUE)p));
3531 if (obj_free(objspace, (
VALUE)p)) {
3536 heap_page_add_freeobj(objspace, sweep_page, (
VALUE)p);
3537 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info((
VALUE)p));
3558 gc_setup_mark_bits(sweep_page);
3560 #if GC_PROFILE_MORE_DETAIL 3563 record->removing_objects +=
final_slots + freed_slots;
3564 record->empty_objects += empty_slots;
3567 if (0) fprintf(stderr,
"gc_page_sweep(%d): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
3572 sweep_page->
free_slots = freed_slots + empty_slots;
3580 gc_finalize_deferred_register(objspace);
3584 gc_report(2, objspace,
"page_sweep: end.\n");
3586 return freed_slots + empty_slots;
3595 heap_set_increment(objspace, 1);
3596 if (!heap_increment(objspace, heap)) {
3603 gc_mode_name(
enum gc_mode mode)
3609 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
3616 #if RGENGC_CHECK_MODE 3618 switch (prev_mode) {
3624 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(
gc_mode(objspace)), gc_mode_name(mode));
3633 #if GC_ENABLE_INCREMENTAL_MARK 3648 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4 3649 __attribute__((noinline))
3655 gc_sweep_start_heap(objspace,
heap_eden);
3661 gc_report(1, objspace,
"gc_sweep_finish\n");
3663 gc_prof_set_heap_info(objspace);
3664 heap_pages_free_unused_pages(objspace);
3667 if (heap_allocatable_pages < heap_tomb->total_pages) {
3668 heap_allocatable_pages_set(objspace,
heap_tomb->total_pages);
3674 #if RGENGC_CHECK_MODE >= 2 3675 gc_verify_internal_consistency(
Qnil);
3683 int unlink_limit = 3;
3684 #if GC_ENABLE_INCREMENTAL_MARK 3687 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
3689 gc_report(2, objspace,
"gc_sweep_step\n");
3694 #if GC_ENABLE_LAZY_SWEEP 3695 gc_prof_sweep_timer_start(objspace);
3698 while (sweep_page) {
3700 int free_slots = gc_page_sweep(objspace, heap, sweep_page);
3708 heap_unlink_page(objspace, heap, sweep_page);
3709 heap_add_page(objspace,
heap_tomb, sweep_page);
3711 else if (free_slots > 0) {
3712 #if GC_ENABLE_INCREMENTAL_MARK 3714 if (heap_add_poolpage(objspace, heap, sweep_page)) {
3719 heap_add_freepage(objspace, heap, sweep_page);
3723 heap_add_freepage(objspace, heap, sweep_page);
3731 sweep_page = next_sweep_page;
3735 gc_sweep_finish(objspace);
3738 #if GC_ENABLE_LAZY_SWEEP 3739 gc_prof_sweep_timer_stop(objspace);
3751 gc_sweep_step(objspace, heap);
3755 #if GC_ENABLE_LAZY_SWEEP 3761 gc_enter(objspace,
"sweep_continue");
3764 gc_report(3, objspace,
"gc_sweep_continue: success heap_increment().\n");
3767 gc_sweep_step(objspace, heap);
3768 gc_exit(objspace,
"sweep_continue");
3777 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
3779 if (immediate_sweep) {
3780 #if !GC_ENABLE_LAZY_SWEEP 3781 gc_prof_sweep_timer_start(objspace);
3783 gc_sweep_start(objspace);
3784 gc_sweep_rest(objspace);
3785 #if !GC_ENABLE_LAZY_SWEEP 3786 gc_prof_sweep_timer_stop(objspace);
3791 gc_sweep_start(objspace);
3800 gc_heap_prepare_minimum_pages(objspace,
heap_eden);
3806 stack_chunk_alloc(
void)
3830 size += stack->
limit;
3831 chunk = chunk->
next;
3840 stack->
cache = chunk;
3850 chunk = stack->
cache;
3866 next = stack->
cache;
3873 next = stack_chunk_alloc();
3887 add_stack_chunk_cache(stack, stack->
chunk);
3898 while (chunk !=
NULL) {
3909 push_mark_stack_chunk(stack);
3917 if (is_mark_stack_empty(stack)) {
3920 if (stack->
index == 1) {
3922 pop_mark_stack_chunk(stack);
3930 #if GC_ENABLE_INCREMENTAL_MARK 3935 for (i=0; i<limit; i++) {
3936 if (chunk->
data[i] == obj) {
3948 int limit = stack->
index;
3951 if (invalidate_mark_stack_chunk(chunk, limit, obj))
return;
3952 chunk = chunk->
next;
3953 limit = stack->
limit;
3955 rb_bug(
"invalid_mark_stack: unreachable");
3968 for (i=0; i < 4; i++) {
3969 add_stack_chunk_cache(stack, stack_chunk_alloc());
3977 #define SET_STACK_END (SET_MACHINE_STACK_END(&ec->machine.stack_end), ec->machine.register_stack_end = rb_ia64_bsp()) 3979 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end) 3982 #define STACK_START (ec->machine.stack_start) 3983 #define STACK_END (ec->machine.stack_end) 3984 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE)) 3986 #if STACK_GROW_DIRECTION < 0 3987 # define STACK_LENGTH (size_t)(STACK_START - STACK_END) 3988 #elif STACK_GROW_DIRECTION > 0 3989 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1) 3991 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \ 3992 : (size_t)(STACK_END - STACK_START + 1)) 3994 #if !STACK_GROW_DIRECTION 4002 if (end > addr)
return ruby_stack_grow_direction = 1;
4003 return ruby_stack_grow_direction = -1;
4016 #define PREVENT_STACK_OVERFLOW 1 4017 #ifndef PREVENT_STACK_OVERFLOW 4018 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)) 4019 # define PREVENT_STACK_OVERFLOW 1 4021 # define PREVENT_STACK_OVERFLOW 0 4024 #if PREVENT_STACK_OVERFLOW 4034 ret = (
VALUE*)rb_ia64_bsp() - ec->
machine.register_stack_start >
4035 ec->
machine.register_stack_maxsize/
sizeof(
VALUE) - water_mark;
4041 #define stack_check(th, water_mark) FALSE 4044 #define STACKFRAME_FOR_CALL_CFUNC 838 4060 mark_locations_array(
rb_objspace_t *objspace,
register const VALUE *x,
register long n)
4065 gc_mark_maybe(objspace, v);
4075 if (end <= start)
return;
4077 mark_locations_array(objspace, start, n);
4083 gc_mark_locations(&rb_objspace, start, end);
4091 for (i=0; i<n; i++) {
4092 gc_mark(objspace, values[i]);
4100 gc_mark_values(objspace, n, values);
4107 gc_mark(objspace, (
VALUE)value);
4122 gc_mark(objspace, (
VALUE)key);
4136 mark_set(&rb_objspace, tbl);
4144 gc_mark(objspace, (
VALUE)key);
4145 gc_mark(objspace, (
VALUE)value);
4159 mark_hash(&rb_objspace, tbl);
4167 gc_mark(objspace, me->
owner);
4171 switch (def->type) {
4173 if (def->body.iseq.iseqptr) gc_mark(objspace, (
VALUE)def->body.iseq.iseqptr);
4174 gc_mark(objspace, (
VALUE)def->body.iseq.cref);
4178 gc_mark(objspace, def->body.attr.location);
4181 gc_mark(objspace, def->body.proc);
4184 gc_mark(objspace, (
VALUE)def->body.alias.original_me);
4187 gc_mark(objspace, (
VALUE)def->body.refined.orig_me);
4188 gc_mark(objspace, (
VALUE)def->body.refined.owner);
4202 mark_method_entry_i(
VALUE me,
void *data)
4206 gc_mark(objspace, me);
4219 mark_const_entry_i(
VALUE value,
void *data)
4224 gc_mark(objspace, ce->
value);
4225 gc_mark(objspace, ce->
file);
4236 #if STACK_GROW_DIRECTION < 0 4237 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START) 4238 #elif STACK_GROW_DIRECTION > 0 4239 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix)) 4241 #define GET_STACK_BOUNDS(start, end, appendix) \ 4242 ((STACK_END < STACK_START) ? \ 4243 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix))) 4247 const VALUE *stack_start,
const VALUE *stack_end);
4255 } save_regs_gc_mark;
4256 VALUE *stack_start, *stack_end;
4268 mark_locations_array(objspace, save_regs_gc_mark.v,
numberof(save_regs_gc_mark.v));
4270 mark_stack_locations(objspace, ec, stack_start, stack_end);
4277 VALUE *stack_start, *stack_end;
4280 mark_stack_locations(objspace, ec, stack_start, stack_end);
4285 const VALUE *stack_start,
const VALUE *stack_end)
4288 gc_mark_locations(objspace, stack_start, stack_end);
4290 gc_mark_locations(objspace,
4291 ec->
machine.register_stack_start,
4292 ec->
machine.register_stack_end);
4294 #if defined(__mc68000__) 4295 gc_mark_locations(objspace,
4296 (
VALUE*)((
char*)stack_start + 2),
4297 (
VALUE*)((
char*)stack_end - 2));
4304 mark_tbl(&rb_objspace, tbl);
4311 if (is_pointer_to_heap(objspace, (
void *)obj)) {
4314 gc_mark_ptr(objspace, obj);
4322 gc_mark_maybe(&rb_objspace, obj);
4328 if (RVALUE_MARKED(obj))
return 0;
4345 #if RGENGC_PROFILE > 0 4346 objspace->
profile.total_remembered_shady_object_count++;
4347 #if RGENGC_PROFILE >= 2 4366 if (RVALUE_WB_UNPROTECTED(obj)) {
4367 if (gc_remember_unprotected(objspace, obj)) {
4368 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
4372 if (!RVALUE_OLD_P(obj)) {
4373 if (RVALUE_MARKED(obj)) {
4375 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
4376 RVALUE_AGE_SET_OLD(objspace, obj);
4378 if (!RVALUE_MARKING(obj)) {
4379 gc_grey(objspace, obj);
4383 rgengc_remember(objspace, obj);
4387 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
4388 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
4401 #if RGENGC_CHECK_MODE 4402 if (RVALUE_MARKED(obj) ==
FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(obj));
4403 if (RVALUE_MARKING(obj) ==
TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(obj));
4406 #if GC_ENABLE_INCREMENTAL_MARK 4422 check_rvalue_consistency(obj);
4425 if (!RVALUE_OLD_P(obj)) {
4426 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(obj));
4427 RVALUE_AGE_INC(objspace, obj);
4431 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
4434 check_rvalue_consistency(obj);
4446 rgengc_check_relation(objspace, obj);
4447 if (!gc_mark_set(objspace, obj))
return;
4448 gc_aging(objspace, obj);
4449 gc_grey(objspace, obj);
4459 if (!is_markable_object(objspace, obj))
return;
4460 gc_mark_ptr(objspace, obj);
4466 gc_mark(&rb_objspace, ptr);
4476 return RVALUE_MARKED(obj) ?
TRUE :
FALSE;
4483 if (RVALUE_OLD_P(obj)) {
4500 gc_mark_values(objspace, (
long)env->
env_size, env->
env);
4507 gc_mark(objspace,
RANY(obj)->as.imemo.cref.klass);
4508 gc_mark(objspace, (
VALUE)
RANY(obj)->as.imemo.cref.next);
4509 gc_mark(objspace,
RANY(obj)->as.imemo.cref.refinements);
4512 gc_mark(objspace,
RANY(obj)->as.imemo.svar.cref_or_me);
4513 gc_mark(objspace,
RANY(obj)->as.imemo.svar.lastline);
4514 gc_mark(objspace,
RANY(obj)->as.imemo.svar.backref);
4515 gc_mark(objspace,
RANY(obj)->as.imemo.svar.others);
4518 gc_mark(objspace,
RANY(obj)->as.imemo.throw_data.throw_obj);
4521 gc_mark_maybe(objspace, (
VALUE)
RANY(obj)->as.imemo.ifunc.data);
4524 gc_mark(objspace,
RANY(obj)->as.imemo.memo.v1);
4525 gc_mark(objspace,
RANY(obj)->as.imemo.memo.v2);
4526 gc_mark_maybe(objspace,
RANY(obj)->as.imemo.memo.u3.value);
4529 mark_method_entry(objspace, &
RANY(obj)->as.imemo.ment);
4542 #if VM_CHECK_MODE > 0 4553 gc_mark_set_parent(objspace, obj);
4562 rb_bug(
"rb_gc_mark() called for broken object");
4567 if (obj) gc_mark(objspace, obj);
4571 gc_mark_imemo(objspace, obj);
4603 for (i=0; i <
len; i++) {
4604 gc_mark(objspace, *ptr++);
4627 if (mark_func) (*mark_func)(ptr);
4636 for (i = 0; i <
len; i++) {
4637 gc_mark(objspace, *ptr++);
4685 gc_mark(objspace, *ptr++);
4696 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
4698 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
4711 #if GC_ENABLE_INCREMENTAL_MARK 4712 size_t marked_slots_at_the_beginning = objspace->
marked_slots;
4713 size_t popped_count = 0;
4716 while (pop_mark_stack(mstack, &obj)) {
4717 if (obj ==
Qundef)
continue;
4720 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
4722 gc_mark_children(objspace, obj);
4724 #if GC_ENABLE_INCREMENTAL_MARK 4727 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
4732 if (popped_count + (objspace->
marked_slots - marked_slots_at_the_beginning) > count) {
4744 if (is_mark_stack_empty(mstack)) {
4745 shrink_stack_chunk_cache(mstack);
4754 gc_mark_stacked_objects_incremental(
rb_objspace_t *objspace,
size_t count)
4756 return gc_mark_stacked_objects(objspace,
TRUE, count);
4762 return gc_mark_stacked_objects(objspace,
FALSE, 0);
4765 #if PRINT_ROOT_TICKS 4766 #define MAX_TICKS 0x100 4767 static tick_t mark_ticks[MAX_TICKS];
4768 static const char *mark_ticks_categories[MAX_TICKS];
4771 show_mark_ticks(
void)
4774 fprintf(stderr,
"mark ticks result:\n");
4775 for (i=0; i<MAX_TICKS; i++) {
4776 const char *category = mark_ticks_categories[i];
4778 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
4789 gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
4795 #if PRINT_ROOT_TICKS 4796 tick_t start_tick = tick();
4798 const char *prev_category = 0;
4800 if (mark_ticks_categories[0] == 0) {
4801 atexit(show_mark_ticks);
4805 if (categoryp) *categoryp =
"xxx";
4811 #if PRINT_ROOT_TICKS 4812 #define MARK_CHECKPOINT_PRINT_TICK(category) do { \ 4813 if (prev_category) { \ 4814 tick_t t = tick(); \ 4815 mark_ticks[tick_count] = t - start_tick; \ 4816 mark_ticks_categories[tick_count] = prev_category; \ 4819 prev_category = category; \ 4820 start_tick = tick(); \ 4823 #define MARK_CHECKPOINT_PRINT_TICK(category) 4826 #define MARK_CHECKPOINT(category) do { \ 4827 if (categoryp) *categoryp = category; \ 4828 MARK_CHECKPOINT_PRINT_TICK(category); \ 4840 mark_current_machine_context(objspace, &th->
ec);
4860 #undef MARK_CHECKPOINT 4863 #if RGENGC_CHECK_MODE >= 4 4865 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01) 4866 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01) 4867 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1)) 4875 static struct reflist *
4876 reflist_create(
VALUE obj)
4878 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
4881 refs->list[0] = obj;
4887 reflist_destruct(
struct reflist *refs)
4894 reflist_add(
struct reflist *refs,
VALUE obj)
4896 if (refs->pos == refs->size) {
4901 refs->list[refs->pos++] = obj;
4905 reflist_dump(
struct reflist *refs)
4908 for (i=0; i<refs->pos; i++) {
4909 VALUE obj = refs->list[i];
4910 if (IS_ROOTSIG(obj)) {
4911 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
4914 fprintf(stderr,
"<%s>", obj_info(obj));
4916 if (i+1 < refs->pos) fprintf(stderr,
", ");
4921 reflist_refered_from_machine_context(
struct reflist *refs)
4924 for (i=0; i<refs->pos; i++) {
4925 VALUE obj = refs->list[i];
4926 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
4941 const char *category;
4947 allrefs_add(
struct allrefs *data,
VALUE obj)
4949 struct reflist *refs;
4952 reflist_add(refs, data->root_obj);
4956 refs = reflist_create(data->root_obj);
4963 allrefs_i(
VALUE obj,
void *ptr)
4965 struct allrefs *data = (
struct allrefs *)ptr;
4967 if (allrefs_add(data, obj)) {
4968 push_mark_stack(&data->mark_stack, obj);
4973 allrefs_roots_i(
VALUE obj,
void *ptr)
4975 struct allrefs *data = (
struct allrefs *)ptr;
4977 data->root_obj = MAKE_ROOTSIG(data->category);
4979 if (allrefs_add(data, obj)) {
4980 push_mark_stack(&data->mark_stack, obj);
4987 struct allrefs data;
4988 struct mark_func_data_struct mfd;
4993 data.objspace = objspace;
4995 init_mark_stack(&data.mark_stack);
4997 mfd.mark_func = allrefs_roots_i;
5003 gc_mark_roots(objspace, &data.category);
5007 while (pop_mark_stack(&data.mark_stack, &obj)) {
5010 free_stack_chunks(&data.mark_stack);
5013 return data.references;
5019 struct reflist *refs = (
struct reflist *)value;
5020 reflist_destruct(refs);
5025 objspace_allrefs_destruct(
struct st_table *refs)
5027 st_foreach(refs, objspace_allrefs_destruct_i, 0);
5031 #if RGENGC_CHECK_MODE >= 5 5036 struct reflist *refs = (
struct reflist *)v;
5037 fprintf(stderr,
"[allrefs_dump_i] %s <- ", obj_info(obj));
5039 fprintf(stderr,
"\n");
5046 fprintf(stderr,
"[all refs] (size: %d)\n", (
int)objspace->
rgengc.allrefs_table->num_entries);
5055 struct reflist *refs = (
struct reflist *)v;
5060 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
5061 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
5064 if (reflist_refered_from_machine_context(refs)) {
5065 fprintf(stderr,
" (marked from machine stack).\n");
5069 objspace->
rgengc.error_count++;
5070 fprintf(stderr,
"\n");
5077 gc_marks_check(
rb_objspace_t *objspace,
int (*checker_func)(
ANYARGS),
const char *checker_name)
5080 #if RGENGC_ESTIMATE_OLDMALLOC 5085 objspace->
rgengc.allrefs_table = objspace_allrefs(objspace);
5091 if (objspace->
rgengc.error_count > 0) {
5092 #if RGENGC_CHECK_MODE >= 5 5093 allrefs_dump(objspace);
5095 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
5098 objspace_allrefs_destruct(objspace->
rgengc.allrefs_table);
5099 objspace->
rgengc.allrefs_table = 0;
5103 #if RGENGC_ESTIMATE_OLDMALLOC 5124 check_generation_i(
const VALUE child,
void *ptr)
5131 if (!RVALUE_OLD_P(child)) {
5132 if (!RVALUE_REMEMBERED(parent) &&
5133 !RVALUE_REMEMBERED(child) &&
5134 !RVALUE_UNCOLLECTIBLE(child)) {
5135 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
5142 check_color_i(
const VALUE child,
void *ptr)
5147 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
5148 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5149 obj_info(parent), obj_info(child));
5156 check_children_i(
const VALUE child,
void *ptr)
5158 check_rvalue_consistency(child);
5162 verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
void *ptr)
5168 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
5169 if (is_live_object(objspace, obj)) {
5179 if (RVALUE_OLD_P(obj)) data->old_object_count++;
5180 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
5182 if (!
is_marking(objspace) && RVALUE_OLD_P(obj)) {
5189 if (RVALUE_BLACK_P(obj)) {
5213 unsigned int has_remembered_shady =
FALSE;
5214 unsigned int has_remembered_old =
FALSE;
5215 int rememberd_old_objects = 0;
5216 int free_objects = 0;
5217 int zombie_objects = 0;
5221 if (
RBASIC(val) == 0) free_objects++;
5224 has_remembered_shady =
TRUE;
5227 has_remembered_old =
TRUE;
5228 rememberd_old_objects++;
5238 fprintf(stderr,
"marking -> %s\n", obj_info(val));
5241 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
5242 page, rememberd_old_objects, obj ? obj_info(obj) :
"");
5246 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
5247 page, obj ? obj_info(obj) :
"");
5253 rb_bug(
"page %p's free_slots should be %d, but %d\n", page, (
int)page->
free_slots, free_objects);
5257 rb_bug(
"page %p's final_slots should be %d, but %d\n", page, (
int)page->
final_slots, zombie_objects);
5260 return rememberd_old_objects;
5269 int rememberd_old_objects = 0;
5273 rememberd_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
5278 return rememberd_old_objects;
5284 int rememberd_old_objects = 0;
5285 rememberd_old_objects = gc_verify_heap_pages_(objspace,
heap_eden->pages);
5286 rememberd_old_objects = gc_verify_heap_pages_(objspace,
heap_tomb->pages);
5287 return rememberd_old_objects;
5301 gc_verify_internal_consistency(
VALUE dummy)
5308 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
5312 eo_args.
callback = verify_internal_consistency_i;
5313 eo_args.
data = (
void *)&data;
5314 objspace_each_objects((
VALUE)&eo_args);
5317 #if RGENGC_CHECK_MODE >= 5 5319 gc_marks_check(objspace,
NULL,
NULL);
5320 allrefs_dump(objspace);
5322 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
5326 gc_verify_heap_pages(objspace);
5332 fprintf(stderr,
"heap_pages_final_slots: %d, objspace->profile.total_freed_objects: %d\n",
5350 size_t list_count = 0;
5363 rb_bug(
"inconsistent finalizing object count:\n" 5366 " heap_pages_deferred_final list has %"PRIuSIZE" items.",
5373 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
5381 gc_verify_internal_consistency(
Qnil);
5390 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
5395 #if GC_ENABLE_INCREMENTAL_MARK 5398 if (0) fprintf(stderr,
"objspace->marked_slots: %d, objspace->rincgc.pooled_page_num: %d, objspace->rincgc.step_slots: %d, \n",
5407 rgengc_mark_and_rememberset_clear(objspace,
heap_eden);
5414 rgengc_rememberset_mark(objspace,
heap_eden);
5418 gc_mark_roots(objspace,
NULL);
5420 gc_report(1, objspace,
"gc_marks_start: (%s) end, stack in %d\n", full_mark ?
"full" :
"minor", (
int)mark_stack_size(&objspace->
mark_stack));
5423 #if GC_ENABLE_INCREMENTAL_MARK 5437 bits_t bits = mark_bits[j] & wbun_bits[j];
5444 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((
VALUE)p));
5447 gc_mark_children(objspace, (
VALUE)p);
5458 gc_mark_stacked_objects_all(objspace);
5462 heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
5479 #if GC_ENABLE_INCREMENTAL_MARK 5483 heap_move_pooled_pages_to_free_pages(
heap_eden);
5484 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
5489 rb_bug(
"gc_marks_finish: mark stack is not empty (%d).", (
int)mark_stack_size(&objspace->
mark_stack));
5492 gc_mark_roots(objspace, 0);
5495 gc_report(1, objspace,
"gc_marks_finish: not empty (%d). retry.\n", (
int)mark_stack_size(&objspace->
mark_stack));
5499 #if RGENGC_CHECK_MODE >= 2 5500 if (gc_verify_heap_pages(objspace) != 0) {
5501 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
5507 gc_marks_wb_unprotected_objects(objspace);
5511 #if RGENGC_CHECK_MODE >= 2 5512 gc_verify_internal_consistency(
Qnil);
5524 #if RGENGC_CHECK_MODE >= 4 5525 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
5532 size_t sweep_slots = total_slots - objspace->
marked_slots;
5542 if (sweep_slots > max_free_slots) {
5553 if (sweep_slots < min_free_slots) {
5554 if (!full_marking) {
5556 full_marking =
TRUE;
5561 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
5567 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
5568 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slots, total_slots));
5569 heap_increment(objspace, heap);
5590 gc_report(1, objspace,
"gc_marks_finish (marks %d objects, old %d objects, total %d slots, sweep %d slots, increment: %d, next GC: %s)\n",
5594 if (sweep_slots < min_free_slots) {
5595 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
5596 heap_set_increment(objspace, heap_extend_pages(objspace, sweep_slot, total_slot));
5597 heap_increment(objspace, heap);
5607 #if GC_ENABLE_INCREMENTAL_MARK 5613 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
5614 if (gc_marks_finish(objspace)) {
5619 if (0) fprintf(stderr,
"objspace->marked_slots: %d\n", (
int)objspace->
marked_slots);
5626 gc_report(1, objspace,
"gc_marks_rest\n");
5628 #if GC_ENABLE_INCREMENTAL_MARK 5634 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) ==
FALSE);
5635 }
while (gc_marks_finish(objspace) ==
FALSE);
5638 gc_mark_stacked_objects_all(objspace);
5639 gc_marks_finish(objspace);
5646 #if GC_ENABLE_INCREMENTAL_MARK 5655 gc_enter(objspace,
"marks_continue");
5661 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
5664 from =
"pooled-pages";
5666 else if (heap_increment(objspace, heap)) {
5668 from =
"incremented-pages";
5672 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n", slots, from);
5676 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %d).\n", (
int)mark_stack_size(&objspace->
mark_stack));
5677 gc_marks_rest(objspace);
5682 gc_exit(objspace,
"marks_continue");
5689 gc_prof_mark_timer_start(objspace);
5696 gc_marks_start(objspace, full_mark);
5698 gc_marks_rest(objspace);
5701 #if RGENGC_PROFILE > 0 5709 gc_marks_start(objspace,
TRUE);
5710 gc_marks_rest(objspace);
5714 gc_prof_mark_timer_stop(objspace);
5726 const char *status =
" ";
5742 va_start(args, fmt);
5746 fprintf(out,
"%s|", status);
5758 return RVALUE_REMEMBERED(obj);
5785 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(obj),
5786 rgengc_remembersetbits_get(objspace, obj) ?
"was already remembered" :
"is remembered now");
5788 check_rvalue_consistency(obj);
5791 if (RVALUE_WB_UNPROTECTED(obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(obj));
5794 #if RGENGC_PROFILE > 0 5795 if (!rgengc_remembered(objspace, obj)) {
5796 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
5797 objspace->
profile.total_remembered_normal_object_count++;
5798 #if RGENGC_PROFILE >= 2 5805 return rgengc_remembersetbits_set(objspace, obj);
5811 int result = rgengc_remembersetbits_get(objspace, obj);
5812 check_rvalue_consistency(obj);
5813 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(obj));
5817 #ifndef PROFILE_REMEMBERSET_MARK 5818 #define PROFILE_REMEMBERSET_MARK 0 5826 #if PROFILE_REMEMBERSET_MARK 5827 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5829 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
5839 #if PROFILE_REMEMBERSET_MARK 5845 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
5846 marking_bits[j] = 0;
5859 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(obj));
5861 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
5863 gc_mark_children(objspace, obj);
5871 #if PROFILE_REMEMBERSET_MARK 5880 #if PROFILE_REMEMBERSET_MARK 5881 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
5883 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
5909 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
5910 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
5911 if (
is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
5916 if (!rgengc_remembered(objspace, a)) {
5917 rgengc_remember(objspace, a);
5918 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
5923 if (RVALUE_WB_UNPROTECTED(b)) {
5924 gc_remember_unprotected(objspace, b);
5927 RVALUE_AGE_SET_OLD(objspace, b);
5928 rgengc_remember(objspace, b);
5931 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
5934 check_rvalue_consistency(a);
5935 check_rvalue_consistency(b);
5938 #if GC_ENABLE_INCREMENTAL_MARK 5942 gc_mark_set_parent(objspace, parent);
5943 rgengc_check_relation(objspace, obj);
5944 if (gc_mark_set(objspace, obj) ==
FALSE)
return;
5945 gc_aging(objspace, obj);
5946 gc_grey(objspace, obj);
5954 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %s -> %s\n", obj_info(a), obj_info(b));
5956 if (RVALUE_BLACK_P(a)) {
5957 if (RVALUE_WHITE_P(b)) {
5958 if (!RVALUE_WB_UNPROTECTED(a)) {
5959 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %s -> %s\n", obj_info(a), obj_info(b));
5960 gc_mark_from(objspace, b, a);
5963 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
5964 if (!RVALUE_WB_UNPROTECTED(b)) {
5965 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %s -> %s\n", obj_info(a), obj_info(b));
5966 RVALUE_AGE_SET_OLD(objspace, b);
5968 if (RVALUE_BLACK_P(b)) {
5969 gc_grey(objspace, b);
5973 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %s -> %s\n", obj_info(a), obj_info(b));
5974 gc_remember_unprotected(objspace, b);
5980 #define gc_writebarrier_incremental(a, b, objspace) 5992 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
5996 gc_writebarrier_generational(a, b, objspace);
6000 gc_writebarrier_incremental(a, b, objspace);
6007 if (RVALUE_WB_UNPROTECTED(obj)) {
6013 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
6014 rgengc_remembered(objspace, obj) ?
" (already remembered)" :
"");
6016 if (RVALUE_OLD_P(obj)) {
6017 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
6018 RVALUE_DEMOTE(objspace, obj);
6019 gc_mark_set(objspace, obj);
6020 gc_remember_unprotected(objspace, obj);
6023 objspace->
profile.total_shade_operation_count++;
6024 #if RGENGC_PROFILE >= 2 6030 RVALUE_AGE_RESET(obj);
6045 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(obj));
6048 if (RVALUE_BLACK_P(obj)) {
6049 gc_grey(objspace, obj);
6053 if (RVALUE_OLD_P(obj)) {
6054 rgengc_remember(objspace, obj);
6059 static st_table *rgengc_unprotect_logging_table;
6064 fprintf(stderr,
"%s\t%d\n", (
char *)key, (
int)val);
6069 rgengc_unprotect_logging_exit_func(
void)
6071 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
6079 if (rgengc_unprotect_logging_table == 0) {
6081 atexit(rgengc_unprotect_logging_exit_func);
6084 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
6089 snprintf(ptr, 0x100 - 1,
"%s|%s:%d", obj_info(obj), filename, line);
6109 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
6110 if (!RVALUE_OLD_P(dest)) {
6112 RVALUE_AGE_RESET_RAW(dest);
6115 RVALUE_DEMOTE(objspace, dest);
6119 check_rvalue_consistency(dest);
6129 return RVALUE_WB_UNPROTECTED(obj) ?
Qfalse :
Qtrue;
6145 static ID ID_marked;
6147 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible;
6151 #define I(s) ID_##s = rb_intern(#s); 6163 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
6164 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
6165 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
6180 int is_old = RVALUE_OLD_P(obj);
6182 gc_report(2, objspace,
"rb_gc_force_recycle: %s\n", obj_info(obj));
6185 if (RVALUE_MARKED(obj)) {
6192 #if GC_ENABLE_INCREMENTAL_MARK 6195 invalidate_mark_stack(&objspace->
mark_stack, obj);
6206 #if GC_ENABLE_INCREMENTAL_MARK 6222 #ifndef MARK_OBJECT_ARY_BUCKET_SIZE 6223 #define MARK_OBJECT_ARY_BUCKET_SIZE 1024 6258 if (tmp->
varptr == addr) {
6290 #define gc_stress_full_mark_after_malloc_p() \ 6291 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc))) 6297 if (!heap_increment(objspace, heap)) {
6298 heap_set_increment(objspace, 1);
6299 heap_increment(objspace, heap);
6319 gc_prof_set_malloc_info(objspace);
6351 #if RGENGC_ESTIMATE_OLDMALLOC 6363 if (0) fprintf(stderr,
"%d\t%d\t%u\t%u\t%d\n",
6386 garbage_collect(
rb_objspace_t *objspace,
int full_mark,
int immediate_mark,
int immediate_sweep,
int reason)
6388 #if GC_PROFILE_MORE_DETAIL 6389 objspace->
profile.prepare_time = getrusage_time();
6394 #if GC_PROFILE_MORE_DETAIL 6395 objspace->
profile.prepare_time = getrusage_time() - objspace->
profile.prepare_time;
6398 return gc_start(objspace, full_mark, immediate_mark, immediate_sweep, reason);
6402 gc_start(
rb_objspace_t *objspace,
const int full_mark,
const int immediate_mark,
const unsigned int immediate_sweep,
int reason)
6404 int do_full_mark = full_mark;
6413 #if RGENGC_CHECK_MODE >= 2 6414 gc_verify_internal_consistency(
Qnil);
6417 gc_enter(objspace,
"gc_start");
6423 do_full_mark =
TRUE;
6432 do_full_mark =
TRUE;
6436 do_full_mark =
TRUE;
6447 #if GC_ENABLE_INCREMENTAL_MARK 6462 gc_report(1, objspace,
"gc_start(%d, %d, %d, reason: %d) => %d, %d, %d\n",
6463 full_mark, immediate_mark, immediate_sweep, reason,
6470 gc_prof_setup_new_record(objspace, reason);
6471 gc_reset_malloc_info(objspace);
6476 gc_prof_timer_start(objspace);
6478 gc_marks(objspace, do_full_mark);
6480 gc_prof_timer_stop(objspace);
6482 gc_exit(objspace,
"gc_start");
6492 if (marking || sweeping) {
6493 gc_enter(objspace,
"gc_rest");
6499 gc_marks_rest(objspace);
6503 gc_sweep_rest(objspace);
6505 gc_exit(objspace,
"gc_rest");
6525 #if GC_ENABLE_INCREMENTAL_MARK 6543 static char buff[0x10];
6544 gc_current_status_fill(objspace, buff);
6548 #if PRINT_ENTER_EXIT_TICK 6550 static tick_t last_exit_tick;
6551 static tick_t enter_tick;
6552 static int enter_count = 0;
6553 static char last_gc_status[0x10];
6556 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
6558 if (direction == 0) {
6560 enter_tick = tick();
6561 gc_current_status_fill(objspace, last_gc_status);
6564 tick_t exit_tick = tick();
6565 char current_gc_status[0x10];
6566 gc_current_status_fill(objspace, current_gc_status);
6569 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
6570 enter_tick - last_exit_tick,
6571 exit_tick - enter_tick,
6573 last_gc_status, current_gc_status,
6575 last_exit_tick = exit_tick;
6578 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
6580 exit_tick - enter_tick,
6582 last_gc_status, current_gc_status,
6589 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
6602 gc_report(1, objspace,
"gc_entr: %s [%s]\n", event, gc_current_status(objspace));
6603 gc_record(objspace, 0, event);
6613 gc_record(objspace, 1, event);
6614 gc_report(1, objspace,
"gc_exit: %s [%s]\n", event, gc_current_status(objspace));
6619 gc_with_gvl(
void *ptr)
6626 garbage_collect_with_gvl(
rb_objspace_t *objspace,
int full_mark,
int immediate_mark,
int immediate_sweep,
int reason)
6630 return garbage_collect(objspace, full_mark, immediate_mark, immediate_sweep, reason);
6644 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
6688 gc_start_internal(
int argc,
VALUE *argv,
VALUE self)
6691 int full_mark =
TRUE, immediate_mark =
TRUE, immediate_sweep =
TRUE;
6693 static ID keyword_ids[3];
6700 if (!keyword_ids[0]) {
6701 keyword_ids[0] =
rb_intern(
"full_mark");
6702 keyword_ids[1] =
rb_intern(
"immediate_mark");
6703 keyword_ids[2] =
rb_intern(
"immediate_sweep");
6708 if (kwvals[0] !=
Qundef) full_mark =
RTEST(kwvals[0]);
6709 if (kwvals[1] !=
Qundef) immediate_mark =
RTEST(kwvals[1]);
6710 if (kwvals[2] !=
Qundef) immediate_sweep =
RTEST(kwvals[2]);
6713 garbage_collect(objspace, full_mark, immediate_mark, immediate_sweep,
GPR_FLAG_METHOD);
6714 gc_finalize_deferred(objspace);
6731 gc_finalize_deferred(objspace);
6748 #if RGENGC_PROFILE >= 2 6750 static const char *type_name(
int type,
VALUE obj);
6753 gc_count_add_each_types(
VALUE hash,
const char *
name,
const size_t *types)
6757 for (i=0; i<
T_MASK; i++) {
6758 const char *type = type_name(i, 0);
6782 gc_count(
VALUE self)
6788 gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const int orig_flags)
6790 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
6791 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
6792 #if RGENGC_ESTIMATE_OLDMALLOC 6793 static VALUE sym_oldmalloc;
6795 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
6796 static VALUE sym_none, sym_marking, sym_sweeping;
6811 if (sym_major_by ==
Qnil) {
6812 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s)) 6824 #if RGENGC_ESTIMATE_OLDMALLOC 6838 #define SET(name, attr) \ 6839 if (key == sym_##name) \ 6841 else if (hash != Qnil) \ 6842 rb_hash_aset(hash, sym_##name, (attr)); 6849 #if RGENGC_ESTIMATE_OLDMALLOC 6853 SET(major_by, major_by);
6867 if (orig_flags == 0) {
6884 return gc_info_decode(objspace, key, 0);
6897 gc_latest_gc_info(
int argc,
VALUE *argv,
VALUE self)
6912 return gc_info_decode(objspace, arg, 0);
6940 #if RGENGC_ESTIMATE_OLDMALLOC 6945 gc_stat_sym_total_generated_normal_object_count,
6946 gc_stat_sym_total_generated_shady_object_count,
6947 gc_stat_sym_total_shade_operation_count,
6948 gc_stat_sym_total_promoted_count,
6949 gc_stat_sym_total_remembered_normal_object_count,
6950 gc_stat_sym_total_remembered_shady_object_count,
6976 #if RGENGC_ESTIMATE_OLDMALLOC 6985 static VALUE gc_stat_compat_table;
6988 setup_gc_stat_symbols(
void)
6990 if (gc_stat_symbols[0] == 0) {
6991 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s)) 6994 S(heap_sorted_length);
6996 S(heap_available_slots);
6999 S(heap_final_slots);
7000 S(heap_marked_slots);
7003 S(total_allocated_pages);
7004 S(total_freed_pages);
7005 S(total_allocated_objects);
7006 S(total_freed_objects);
7007 S(malloc_increase_bytes);
7008 S(malloc_increase_bytes_limit);
7012 S(remembered_wb_unprotected_objects);
7013 S(remembered_wb_unprotected_objects_limit);
7015 S(old_objects_limit);
7016 #if RGENGC_ESTIMATE_OLDMALLOC 7017 S(oldmalloc_increase_bytes);
7018 S(oldmalloc_increase_bytes_limit);
7021 S(total_generated_normal_object_count);
7022 S(total_generated_shady_object_count);
7023 S(total_shade_operation_count);
7024 S(total_promoted_count);
7025 S(total_remembered_normal_object_count);
7026 S(total_remembered_shady_object_count);
7030 #define S(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] = ID2SYM(rb_intern_const(#s)) 7031 S(gc_stat_heap_used);
7032 S(heap_eden_page_length);
7033 S(heap_tomb_page_length);
7041 S(remembered_shady_object);
7042 S(remembered_shady_object_limit);
7044 S(old_object_limit);
7046 S(total_allocated_object);
7047 S(total_freed_object);
7050 #if RGENGC_ESTIMATE_OLDMALLOC 7051 S(oldmalloc_increase);
7062 #define OLD_SYM(s) gc_stat_compat_symbols[gc_stat_compat_sym_##s] 7063 #define NEW_SYM(s) gc_stat_symbols[gc_stat_sym_##s] 7082 #if RGENGC_ESTIMATE_OLDMALLOC 7094 compat_key(
VALUE key)
7098 if (!
NIL_P(new_key)) {
7099 static int warned = 0;
7101 rb_warn(
"GC.stat keys were changed from Ruby 2.1. " 7103 "Please check <https://bugs.ruby-lang.org/issues/9924> for more information.",
7113 default_proc_for_compat_func(
VALUE hash,
VALUE dmy,
int argc,
VALUE *argv)
7121 if ((new_key = compat_key(key)) !=
Qnil) {
7129 gc_stat_internal(
VALUE hash_or_sym)
7134 setup_gc_stat_symbols();
7140 static VALUE default_proc_for_compat = 0;
7141 if (default_proc_for_compat == 0) {
7142 default_proc_for_compat =
rb_proc_new(default_proc_for_compat_func,
Qnil);
7155 #define SET(name, attr) \ 7156 if (key == gc_stat_symbols[gc_stat_sym_##name]) \ 7158 else if (hash != Qnil) \ 7159 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr)); 7168 SET(heap_available_slots, objspace_available_slots(objspace));
7169 SET(heap_live_slots, objspace_live_slots(objspace));
7170 SET(heap_free_slots, objspace_free_slots(objspace));
7188 #if RGENGC_ESTIMATE_OLDMALLOC 7194 SET(total_generated_normal_object_count, objspace->
profile.total_generated_normal_object_count);
7195 SET(total_generated_shady_object_count, objspace->
profile.total_generated_shady_object_count);
7196 SET(total_shade_operation_count, objspace->
profile.total_shade_operation_count);
7197 SET(total_promoted_count, objspace->
profile.total_promoted_count);
7198 SET(total_remembered_normal_object_count, objspace->
profile.total_remembered_normal_object_count);
7199 SET(total_remembered_shady_object_count, objspace->
profile.total_remembered_shady_object_count);
7206 if ((new_key = compat_key(key)) !=
Qnil) {
7213 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2 7215 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->
profile.generated_normal_object_count_types);
7216 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->
profile.generated_shady_object_count_types);
7217 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->
profile.shade_operation_count_types);
7218 gc_count_add_each_types(hash,
"promoted_types", objspace->
profile.promoted_types);
7219 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->
profile.remembered_normal_object_count_types);
7220 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->
profile.remembered_shady_object_count_types);
7279 size_t value = gc_stat_internal(arg);
7290 gc_stat_internal(arg);
7298 size_t value = gc_stat_internal(key);
7302 gc_stat_internal(key);
7315 gc_stress_get(
VALUE self)
7349 gc_stress_set(objspace, flag);
7401 get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
7403 char *ptr =
getenv(name);
7406 if (ptr !=
NULL && *ptr) {
7409 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG 7410 val = strtoll(ptr, &end, 0);
7412 val = strtol(ptr, &end, 0);
7424 unit = 1024*1024*1024;
7428 while (*end && isspace((
unsigned char)*end)) end++;
7430 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
7434 if (val < -(ssize_t)(
SIZE_MAX / 2 / unit) || (ssize_t)(
SIZE_MAX / 2 / unit) < val) {
7435 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
7440 if (val > 0 && (
size_t)val > lower_bound) {
7442 fprintf(stderr,
"%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
7444 *default_value = (size_t)val;
7449 fprintf(stderr,
"%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
7450 name, val, *default_value, lower_bound);
7459 get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
7461 char *ptr =
getenv(name);
7464 if (ptr !=
NULL && *ptr) {
7467 if (!*ptr || *end) {
7468 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
7472 if (accept_zero && val == 0.0) {
7475 else if (val <= lower_bound) {
7477 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
7478 name, val, *default_value, lower_bound);
7481 else if (upper_bound != 0.0 &&
7482 val > upper_bound) {
7484 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
7485 name, val, *default_value, upper_bound);
7490 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
7491 *default_value =
val;
7499 gc_set_initial_pages(
void)
7505 if (min_pages >
heap_eden->total_pages) {
7555 if (safe_level > 0)
return;
7558 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.
heap_free_slots, 0)) {
7561 else if (get_envparam_size(
"RUBY_FREE_MIN", &gc_params.
heap_free_slots, 0)) {
7562 rb_warn(
"RUBY_FREE_MIN is obsolete. Use RUBY_GC_HEAP_FREE_SLOTS instead.");
7566 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.
heap_init_slots, 0)) {
7567 gc_set_initial_pages();
7569 else if (get_envparam_size(
"RUBY_HEAP_MIN_SLOTS", &gc_params.
heap_init_slots, 0)) {
7570 rb_warn(
"RUBY_HEAP_MIN_SLOTS is obsolete. Use RUBY_GC_HEAP_INIT_SLOTS instead.");
7571 gc_set_initial_pages();
7574 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.
growth_factor, 1.0, 0.0,
FALSE);
7575 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.
growth_max_slots, 0);
7584 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT", &gc_params.
malloc_limit_min, 0);
7585 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.
malloc_limit_max, 0);
7588 #if RGENGC_ESTIMATE_OLDMALLOC 7603 if (is_markable_object(objspace, obj)) {
7604 struct mark_func_data_struct mfd;
7605 mfd.mark_func = func;
7608 gc_mark_children(objspace, obj);
7615 void (*func)(
const char *category,
VALUE,
void *);
7620 root_objects_from(
VALUE obj,
void *ptr)
7631 struct mark_func_data_struct mfd;
7634 data.
data = passing_data;
7636 mfd.mark_func = root_objects_from;
7640 gc_mark_roots(objspace, &data.
category);
7651 negative_size_allocation_error_with_gvl(
void *ptr)
7658 negative_size_allocation_error(
const char *msg)
7668 fprintf(stderr,
"[FATAL] %s\n", msg);
7675 ruby_memerror_body(
void *dummy)
7693 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
7706 if (
during_gc) gc_exit(objspace,
"rb_memerror");
7711 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
7726 aligned_malloc(
size_t alignment,
size_t size)
7730 #if defined __MINGW32__ 7731 res = __mingw_aligned_malloc(size, alignment);
7732 #elif defined _WIN32 7733 void *_aligned_malloc(
size_t,
size_t);
7734 res = _aligned_malloc(size, alignment);
7735 #elif defined(HAVE_POSIX_MEMALIGN) 7736 if (posix_memalign(&res, alignment, size) == 0) {
7742 #elif defined(HAVE_MEMALIGN) 7743 res = memalign(alignment, size);
7746 res =
malloc(alignment + size +
sizeof(
void*));
7747 aligned = (
char*)res + alignment +
sizeof(
void*);
7748 aligned -= ((
VALUE)aligned & (alignment - 1));
7749 ((
void**)aligned)[-1] = res;
7750 res = (
void*)aligned;
7754 GC_ASSERT(((alignment - 1) & alignment) == 0);
7755 GC_ASSERT(alignment %
sizeof(
void*) == 0);
7760 aligned_free(
void *ptr)
7762 #if defined __MINGW32__ 7763 __mingw_aligned_free(ptr);
7764 #elif defined _WIN32 7766 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN) 7769 free(((
void**)ptr)[-1]);
7773 static inline size_t 7774 objspace_malloc_size(
rb_objspace_t *objspace,
void *ptr,
size_t hint)
7776 #ifdef HAVE_MALLOC_USABLE_SIZE 7777 return malloc_usable_size(ptr);
7790 atomic_sub_nounderflow(
size_t *var,
size_t sub)
7792 if (sub == 0)
return;
7796 if (val < sub) sub =
val;
7810 objspace_malloc_increase(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type type)
7812 if (new_size > old_size) {
7814 #if RGENGC_ESTIMATE_OLDMALLOC 7820 #if RGENGC_ESTIMATE_OLDMALLOC 7836 #if MALLOC_ALLOCATED_SIZE 7837 if (new_size >= old_size) {
7841 size_t dec_size = old_size - new_size;
7842 size_t allocated_size = objspace->
malloc_params.allocated_size;
7844 #if MALLOC_ALLOCATED_SIZE_CHECK 7845 if (allocated_size < dec_size) {
7846 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
7849 atomic_sub_nounderflow(&objspace->
malloc_params.allocated_size, dec_size);
7852 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %d, old_size: %d\n",
7857 (
int)new_size, (
int)old_size);
7866 if (allocations > 0) {
7867 atomic_sub_nounderflow(&objspace->
malloc_params.allocations, 1);
7869 #if MALLOC_ALLOCATED_SIZE_CHECK 7881 static inline size_t 7882 objspace_malloc_prepare(
rb_objspace_t *objspace,
size_t size)
7884 if (size == 0) size = 1;
7886 #if CALC_EXACT_MALLOC_SIZE 7887 size +=
sizeof(size_t);
7893 static inline void *
7894 objspace_malloc_fixup(
rb_objspace_t *objspace,
void *mem,
size_t size)
7896 size = objspace_malloc_size(objspace, mem, size);
7899 #if CALC_EXACT_MALLOC_SIZE 7900 ((
size_t *)mem)[0] =
size;
7901 mem = (
size_t *)mem + 1;
7907 #define TRY_WITH_GC(alloc) do { \ 7908 objspace_malloc_gc_stress(objspace); \ 7910 (!garbage_collect_with_gvl(objspace, TRUE, TRUE, TRUE, GPR_FLAG_MALLOC) || \ 7924 size = objspace_malloc_prepare(objspace, size);
7926 return objspace_malloc_fixup(objspace, mem, size);
7929 static inline size_t 7930 xmalloc2_size(
const size_t count,
const size_t elsize)
7933 if (rb_mul_size_overflow(count, elsize,
SSIZE_MAX, &ret)) {
7940 objspace_xrealloc(
rb_objspace_t *objspace,
void *ptr,
size_t new_size,
size_t old_size)
7944 if (!ptr)
return objspace_xmalloc0(objspace, new_size);
7951 if (new_size == 0) {
7952 objspace_xfree(objspace, ptr, old_size);
7956 #if CALC_EXACT_MALLOC_SIZE 7957 new_size +=
sizeof(size_t);
7958 ptr = (
size_t *)ptr - 1;
7959 old_size = ((
size_t *)ptr)[0];
7962 old_size = objspace_malloc_size(objspace, ptr, old_size);
7964 new_size = objspace_malloc_size(objspace, mem, new_size);
7966 #if CALC_EXACT_MALLOC_SIZE 7967 ((
size_t *)mem)[0] = new_size;
7968 mem = (
size_t *)mem + 1;
7977 objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t old_size)
7979 #if CALC_EXACT_MALLOC_SIZE 7980 ptr = ((
size_t *)ptr) - 1;
7981 old_size = ((
size_t*)ptr)[0];
7983 old_size = objspace_malloc_size(objspace, ptr, old_size);
7987 objspace_malloc_increase(objspace, ptr, 0, old_size,
MEMOP_TYPE_FREE);
7991 ruby_xmalloc0(
size_t size)
7993 return objspace_xmalloc0(&rb_objspace, size);
7999 if ((ssize_t)size < 0) {
8000 negative_size_allocation_error(
"too large allocation size");
8002 return ruby_xmalloc0(size);
8016 return objspace_xmalloc0(&rb_objspace, xmalloc2_size(n, size));
8024 size = objspace_malloc_prepare(objspace, size);
8026 return objspace_malloc_fixup(objspace, mem, size);
8032 return objspace_xcalloc(&rb_objspace, xmalloc2_size(n, size));
8035 #ifdef ruby_sized_xrealloc 8036 #undef ruby_sized_xrealloc 8041 if ((ssize_t)new_size < 0) {
8042 negative_size_allocation_error(
"too large allocation size");
8045 return objspace_xrealloc(&rb_objspace, ptr, new_size, old_size);
8054 #ifdef ruby_sized_xrealloc2 8055 #undef ruby_sized_xrealloc2 8060 size_t len = size * n;
8061 if (n != 0 && size != len / n) {
8064 return objspace_xrealloc(&rb_objspace, ptr, len, old_n * size);
8073 #ifdef ruby_sized_xfree 8074 #undef ruby_sized_xfree 8080 objspace_xfree(&rb_objspace, x, size);
8097 #if CALC_EXACT_MALLOC_SIZE 8098 size +=
sizeof(size_t);
8101 #if CALC_EXACT_MALLOC_SIZE 8103 ((
size_t *)mem)[0] = 0;
8104 mem = (
size_t *)mem + 1;
8112 size_t *mem = (
size_t *)ptr;
8113 #if CALC_EXACT_MALLOC_SIZE 8127 ptr = ruby_xmalloc0(size);
8140 if (len < 0 || (cnt = (
long)
roomof(len,
sizeof(
VALUE))) < 0) {
8153 RNODE(s)->u3.cnt = 0;
8158 #if MALLOC_ALLOCATED_SIZE 8169 gc_malloc_allocated_size(
VALUE self)
8184 gc_malloc_allocations(
VALUE self)
8197 else if (diff < 0) {
8212 #define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0 8214 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK 8220 if (!is_live_object(objspace, obj))
return ST_DELETE;
8226 wmap_mark(
void *ptr)
8229 #if WMAP_DELETE_DEAD_OBJECT_IN_MARK 8244 wmap_free(
void *ptr)
8256 *(
size_t *)arg += (ptr[0] + 1) *
sizeof(
VALUE);
8261 wmap_memsize(
const void *ptr)
8264 const struct weakmap *w = ptr;
8283 wmap_allocate(
VALUE klass)
8297 if (!existing)
return ST_STOP;
8299 for (i = j = 1, size = ptr[0]; i <=
size; ++i) {
8300 if (ptr[i] != wmap) {
8330 rids = (
VALUE *)data;
8332 for (i = 0; i <
size; ++i) {
8377 wmap_inspect(
VALUE self)
8398 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8406 wmap_each(
VALUE self)
8421 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8429 wmap_each_key(
VALUE self)
8444 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8452 wmap_each_value(
VALUE self)
8469 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8477 wmap_keys(
VALUE self)
8496 if (is_id_value(objspace, obj) && is_live_object(objspace, obj)) {
8504 wmap_values(
VALUE self)
8521 size = (ptr = optr = (
VALUE *)*val)[0];
8528 ptr = ruby_xmalloc0(2 *
sizeof(
VALUE));
8532 if (ptr == optr)
return ST_STOP;
8544 should_be_finalizable(orig);
8545 should_be_finalizable(wmap);
8546 define_final0(orig, w->
final);
8547 define_final0(wmap, w->
final);
8565 if (!is_id_value(objspace, obj))
return Qnil;
8566 if (!is_live_object(objspace, obj))
return Qnil;
8578 wmap_size(
VALUE self)
8585 #if SIZEOF_ST_INDEX_T <= SIZEOF_LONG 8596 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100 8600 getrusage_time(
void)
8602 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID) 8604 static int try_clock_gettime = 1;
8606 if (try_clock_gettime &&
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
8610 try_clock_gettime = 0;
8617 struct rusage usage;
8619 if (
getrusage(RUSAGE_SELF, &usage) == 0) {
8620 time = usage.ru_utime;
8628 FILETIME creation_time, exit_time, kernel_time, user_time;
8633 if (GetProcessTimes(GetCurrentProcess(),
8634 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
8635 memcpy(&ui, &user_time,
sizeof(FILETIME));
8636 q = ui.QuadPart / 10L;
8637 t = (
DWORD)(q % 1000000L) * 1e-6;
8642 t += (double)(
DWORD)(q >> 16) * (1 << 16);
8643 t += (
DWORD)q & ~(~0 << 16);
8654 gc_prof_setup_new_record(
rb_objspace_t *objspace,
int reason)
8675 rb_bug(
"gc_profile malloc or realloc miss");
8682 #if MALLOC_ALLOCATED_SIZE 8685 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY 8688 struct rusage usage;
8689 if (
getrusage(RUSAGE_SELF, &usage) == 0) {
8690 record->maxrss = usage.ru_maxrss;
8691 record->minflt = usage.ru_minflt;
8692 record->majflt = usage.ru_majflt;
8705 #if GC_PROFILE_MORE_DETAIL 8706 record->prepare_time = objspace->
profile.prepare_time;
8714 elapsed_time_from(
double time)
8716 double now = getrusage_time();
8735 #define RUBY_DTRACE_GC_HOOK(name) \ 8736 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0) 8741 #if GC_PROFILE_MORE_DETAIL 8752 #if GC_PROFILE_MORE_DETAIL 8755 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
8785 record->
gc_time += sweep_time;
8791 #if GC_PROFILE_MORE_DETAIL 8792 record->gc_sweep_time += sweep_time;
8802 #if GC_PROFILE_MORE_DETAIL 8819 #if GC_PROFILE_MORE_DETAIL 8821 record->heap_live_objects = live;
8822 record->heap_free_objects = total - live;
8840 gc_profile_clear(
void)
8907 gc_profile_record_get(
void)
8929 #if GC_PROFILE_MORE_DETAIL 8944 #if RGENGC_PROFILE > 0 8955 #if GC_PROFILE_MORE_DETAIL 8956 #define MAJOR_REASON_MAX 0x10 8959 gc_profile_dump_major_reason(
int flags,
char *buff)
8970 if (reason & GPR_FLAG_MAJOR_BY_##x) { \ 8971 buff[i++] = #x[0]; \ 8972 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \ 8978 #if RGENGC_ESTIMATE_OLDMALLOC 8992 #ifdef MAJOR_REASON_MAX 8993 char reason_str[MAJOR_REASON_MAX];
9001 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
9003 for (i = 0; i <
count; i++) {
9010 #if GC_PROFILE_MORE_DETAIL 9013 "Prepare Time = Previously GC's rest sweep time\n" 9014 "Index Flags Allocate Inc. Allocate Limit" 9018 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj" 9020 " OldgenObj RemNormObj RemShadObj" 9023 " MaxRSS(KB) MinorFLT MajorFLT" 9027 for (i = 0; i <
count; i++) {
9043 gc_profile_dump_major_reason(record->
flags, reason_str),
9050 record->allocate_increase, record->allocate_limit,
9052 record->allocated_size,
9054 record->heap_use_pages,
9055 record->gc_mark_time*1000,
9056 record->gc_sweep_time*1000,
9057 record->prepare_time*1000,
9059 record->heap_live_objects,
9060 record->heap_free_objects,
9061 record->removing_objects,
9062 record->empty_objects
9065 record->old_objects,
9066 record->remembered_normal_objects,
9067 record->remembered_shady_objects
9071 record->maxrss / 1024,
9094 gc_profile_result(
void)
9111 gc_profile_report(
int argc,
VALUE *argv,
VALUE self)
9134 gc_profile_total_time(
VALUE self)
9143 for (i = 0; i <
count; i++) {
9158 gc_profile_enable_get(
VALUE self)
9173 gc_profile_enable(
void)
9190 gc_profile_disable(
void)
9204 type_name(
int type,
VALUE obj)
9207 #define TYPE_NAME(t) case (t): return #t; 9244 obj_type_name(
VALUE obj)
9246 return type_name(
TYPE(obj), obj);
9266 rb_bug(
"method_type_name: unreachable (type: %d)", type);
9270 # define ARY_SHARED_P(ary) \ 9271 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \ 9272 FL_TEST((ary),ELTS_SHARED)!=0) 9273 # define ARY_EMBED_P(ary) \ 9274 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \ 9275 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0) 9278 rb_raw_iseq_info(
char *buff,
const int buff_size,
const rb_iseq_t *iseq)
9282 snprintf(buff, buff_size,
"%s %s@%s:%d", buff,
9293 snprintf(buff, buff_size,
"%s", obj_type_name(obj));
9296 #define TF(c) ((c) != 0 ? "true" : "false") 9297 #define C(c, s) ((c) != 0 ? (s) : " ") 9300 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
9302 snprintf(buff, buff_size,
"%p [%d%s%s%s%s] %s",
9308 obj_type_name(obj));
9310 snprintf(buff, buff_size,
"%p [%s] %s",
9313 obj_type_name(obj));
9316 if (internal_object_p(obj)) {
9319 else if (
RBASIC(obj)->klass == 0) {
9320 snprintf(buff, buff_size,
"%s (temporary internal)", buff);
9324 if (!
NIL_P(class_path)) {
9330 snprintf(buff, buff_size,
"%s @%s:%d", buff,
RANY(obj)->file,
RANY(obj)->line);
9335 snprintf(buff, buff_size,
"%s (%s)", buff,
9339 snprintf(buff, buff_size,
"%s [%s%s] len: %d", buff,
9350 if (!
NIL_P(class_path)) {
9358 rb_raw_iseq_info(buff, buff_size, iseq);
9363 snprintf(buff, buff_size,
"%s %s", buff, type_name);
9369 const char *imemo_name;
9371 #define IMEMO_NAME(x) case imemo_##x: imemo_name = #x; break; 9384 snprintf(buff, buff_size,
"%s %s", buff, imemo_name);
9389 snprintf(buff, buff_size,
"%s (called_id: %s, type: %s, alias: %d, owner: %s, defined_class: %s)", buff,
9391 method_type_name(me->
def->type),
9392 me->
def->alias_count,
9393 obj_info(me->
owner),
9399 rb_raw_iseq_info(buff, buff_size, iseq);
9416 #define OBJ_INFO_BUFFERS_NUM 10 9417 #define OBJ_INFO_BUFFERS_SIZE 0x100 9418 static int obj_info_buffers_index = 0;
9419 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
9424 const int index = obj_info_buffers_index++;
9425 char *
const buff = &obj_info_buffers[index][0];
9427 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
9428 obj_info_buffers_index = 0;
9437 return obj_type_name(obj);
9444 if (!rb_special_const_p(obj)) {
9445 return obj_info(obj);
9448 return obj_type_name(obj);
9456 fprintf(stderr,
"rb_obj_info_dump: %s\n",
rb_raw_obj_info(buff, 0x100, obj));
9466 fprintf(stderr,
"created at: %s:%d\n",
RANY(obj)->file,
RANY(obj)->line);
9468 if (is_pointer_to_heap(objspace, (
void *)obj)) {
9469 fprintf(stderr,
"pointer to heap?: true\n");
9472 fprintf(stderr,
"pointer to heap?: false\n");
9478 fprintf(stderr,
"age? : %d\n", RVALUE_AGE(obj));
9479 fprintf(stderr,
"old? : %s\n", RVALUE_OLD_P(obj) ?
"true" :
"false");
9480 fprintf(stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ?
"false" :
"true");
9481 fprintf(stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(obj) ?
"true" :
"false");
9485 fprintf(stderr,
"lazy sweeping?: true\n");
9486 fprintf(stderr,
"swept?: %s\n", is_swept_object(objspace, obj) ?
"done" :
"not yet");
9489 fprintf(stderr,
"lazy sweeping?: false\n");
9496 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
9501 rb_gcdebug_sentinel(
VALUE obj,
const char *name)
9508 #if GC_DEBUG_STRESS_TO_CLASS 9510 rb_gcdebug_add_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
9522 rb_gcdebug_remove_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
9528 for (i = 0; i <
argc; ++i) {
9679 #if MALLOC_ALLOCATED_SIZE 9684 #if GC_DEBUG_STRESS_TO_CLASS 9693 #define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
struct RString::@67::@68 heap
rb_event_flag_t hook_events
#define RBASIC_CLEAR_CLASS(obj)
int rb_objspace_marked_object_p(VALUE obj)
void rb_gc_finalize_deferred(void)
void rb_class_remove_from_super_subclasses(VALUE klass)
size_t heap_total_objects
void(* RUBY_DATA_FUNC)(void *)
wrapper for method_missing(id)
VALUE rb_ary_last(int argc, const VALUE *argv, VALUE ary)
void rb_class_detach_subclasses(VALUE klass)
void rb_free_const_table(struct rb_id_table *tbl)
void rb_warn(const char *fmt,...)
void rb_bug(const char *fmt,...)
struct heap_page * pooled_pages
void rb_gc_writebarrier(VALUE a, VALUE b)
#define heap_pages_final_slots
#define RUBY_TYPED_FREE_IMMEDIATELY
#define GC_PROFILE_MORE_DETAIL
double heap_free_slots_min_ratio
VALUE rb_obj_id(VALUE obj)
size_t strlen(const char *)
#define has_sweeping_pages(heap)
void rb_objspace_free(rb_objspace_t *objspace)
#define RCLASS_CONST_TBL(c)
#define is_marking(objspace)
size_t uncollectible_wb_unprotected_objects_limit
#define RUBY_DEFAULT_FREE
void rb_gc_free_dsymbol(VALUE)
double gc_sweep_start_time
#define RSTRUCT_CONST_PTR(st)
struct heap_page::@63 flags
VALUE rb_yield_values(int n,...)
void rb_define_singleton_method(VALUE obj, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a singleton method for obj.
int rb_threadptr_stack_check(rb_thread_t *th)
void * ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
#define GC_HEAP_GROWTH_FACTOR
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
size_t ruby_stack_length(VALUE **p)
#define FLUSH_REGISTER_WINDOWS
#define malloc_allocated_size
void rb_gc_free_node(VALUE obj)
#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR
void rb_raise(VALUE exc, const char *fmt,...)
void rb_class_remove_from_module_subclasses(VALUE klass)
#define ATOMIC_EXCHANGE(var, val)
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
VALUE rb_obj_is_thread(VALUE obj)
struct rb_method_definition_struct rb_method_definition_t
#define rb_data_typed_object_alloc
unsigned int during_minor_gc
size_t onig_memsize(const regex_t *reg)
size_t oldmalloc_limit_max
VALUE rb_imemo_new(enum imemo_type type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
VALUE rb_wb_protected_newobj_of(VALUE klass, VALUE flags)
void rb_iseq_free(const rb_iseq_t *iseq)
#define GC_HEAP_FREE_SLOTS_MAX_RATIO
#define TypedData_Get_Struct(obj, type, data_type, sval)
#define RGENGC_FORCE_MAJOR_GC
#define GET_HEAP_UNCOLLECTIBLE_BITS(x)
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
void * ruby_xmalloc2(size_t n, size_t size)
#define heap_pages_sorted_length
void rb_define_private_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
void rb_iseq_mark(const rb_iseq_t *iseq)
size_t zombie_object_count
void ruby_mimfree(void *ptr)
void rb_gcdebug_print_obj_condition(VALUE obj)
#define GC_ENABLE_LAZY_SWEEP
#define rb_vm_register_special_exception(sp, e, m)
void rb_id_table_free(struct rb_id_table *tbl)
#define TH_JUMP_TAG(th, st)
RUBY_ALIAS_FUNCTION(rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree), rb_data_object_wrap,(klass, datap, dmark, dfree))
VALUE rb_ary_push(VALUE ary, VALUE item)
#define SIZED_REALLOC_N(var, type, n, old_n)
#define heap_pages_freeable_pages
if(len<=MAX_WORD_LENGTH &&len >=MIN_WORD_LENGTH)
size_t oldmalloc_increase
#define ARY_SHARED_P(ary)
ONIG_EXTERN void onig_region_free(OnigRegion *region, int free_self)
VALUE rb_ary_tmp_new(long capa)
#define RGENGC_ESTIMATE_OLDMALLOC
void * ruby_xrealloc2(void *ptr, size_t n, size_t size)
struct rb_iseq_constant_body * body
#define RGENGC_CHECK_MODE
void ruby_sized_xfree(void *x, size_t size)
void rb_objspace_reachable_objects_from_root(void(func)(const char *category, VALUE, void *), void *passing_data)
VALUE rb_funcall(VALUE, ID, int,...)
Calls a method.
#define MARK_OBJECT_ARY_BUCKET_SIZE
#define STACK_UPPER(x, a, b)
#define gc_mode_set(objspace, mode)
#define GC_HEAP_FREE_SLOTS_MIN_RATIO
#define GC_MALLOC_LIMIT_MAX
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
int rb_io_fptr_finalize(rb_io_t *)
#define heap_allocated_pages
int rb_objspace_garbage_object_p(VALUE obj)
int ruby_get_stack_grow_direction(volatile VALUE *addr)
#define GC_PROFILE_DETAIL_MEMORY
void * rb_alloc_tmp_buffer(volatile VALUE *store, long len)
void rb_define_alloc_func(VALUE, rb_alloc_func_t)
ONIG_EXTERN void onig_free(OnigRegex)
#define ruby_gc_stress_mode
size_t rb_io_memsize(const rb_io_t *)
#define ruby_gc_stressful
double oldobject_limit_factor
#define nd_set_type(n, t)
void rb_objspace_each_objects(each_obj_callback *callback, void *data)
void rb_include_module(VALUE klass, VALUE module)
#define GC_PROFILE_RECORD_DEFAULT_SIZE
void rb_gc_mark(VALUE ptr)
size_t remembered_shady_count
VALUE rb_hash_lookup(VALUE hash, VALUE key)
int rb_objspace_markable_object_p(VALUE obj)
#define ATOMIC_PTR_EXCHANGE(var, val)
#define RUBY_INTERNAL_EVENT_GC_START
void rb_gc_register_address(VALUE *addr)
int st_update(st_table *table, st_data_t key, st_update_callback_func *func, st_data_t arg)
VALUE rb_io_write(VALUE, VALUE)
#define VALGRIND_MAKE_MEM_UNDEFINED(p, n)
VALUE rb_ensure(VALUE(*b_proc)(ANYARGS), VALUE data1, VALUE(*e_proc)(ANYARGS), VALUE data2)
An equivalent to ensure clause.
size_t heap_used_at_gc_start
void rb_gc_force_recycle(VALUE obj)
VALUE writeconv_pre_ecopts
int char_offset_num_allocated
void rb_obj_info_dump(VALUE obj)
#define gc_event_hook(objspace, event, data)
VALUE rb_inspect(VALUE)
Convenient wrapper of Object::inspect.
VALUE rb_hash_new_with_size(st_index_t size)
#define ATOMIC_VALUE_EXCHANGE(var, val)
const char * rb_source_loc(int *pline)
int ruby_native_thread_p(void)
VALUE rb_ary_cat(VALUE ary, const VALUE *argv, long len)
VALUE rb_str_buf_append(VALUE, VALUE)
size_t total_allocated_objects_at_gc_start
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
#define is_incremental_marking(objspace)
bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT]
int ruby_stack_grow_direction
int ruby_stack_check(void)
size_t oldmalloc_limit_min
struct heap_page_header header
const char * rb_obj_classname(VALUE)
struct rb_objspace::mark_func_data_struct * mark_func_data
#define MARK_CHECKPOINT(category)
#define ATOMIC_SIZE_ADD(var, val)
#define ATOMIC_SIZE_CAS(var, oldval, val)
gc_profile_record * current_record
#define ruby_initial_gc_stress
#define RVALUE_MARKING_BITMAP(obj)
struct rb_objspace::@60 profile
RUBY_SYMBOL_EXPORT_BEGIN typedef unsigned long st_data_t
#define heap_pages_deferred_final
unsigned int immediate_sweep
#define obj_id_to_ref(objid)
#define RVALUE_PAGE_WB_UNPROTECTED(page, obj)
void rb_global_variable(VALUE *var)
#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
void rb_gc_unprotect_logging(void *objptr, const char *filename, int line)
void rb_objspace_each_objects_without_setup(each_obj_callback *callback, void *data)
VALUE data[STACK_CHUNK_SIZE]
unsigned int gc_stressful
void rb_gc_mark_values(long n, const VALUE *values)
void rb_objspace_set_event_hook(const rb_event_flag_t event)
VALUE rb_define_finalizer(VALUE obj, VALUE block)
size_t total_freed_objects
#define RBASIC_SET_CLASS_RAW(obj, cls)
size_t rb_obj_memsize_of(VALUE obj)
VALUE rb_obj_class(VALUE)
call-seq: obj.class -> class
#define RB_TYPE_P(obj, type)
void * ruby_xcalloc(size_t n, size_t size)
#define GET_STACK_BOUNDS(start, end, appendix)
VALUE rb_obj_is_kind_of(VALUE, VALUE)
call-seq: obj.is_a?(class) -> true or false obj.kind_of?(class) -> true or false
size_t uncollectible_wb_unprotected_objects
#define ATOMIC_SET(var, val)
#define will_be_incremental_marking(objspace)
#define MEMZERO(p, type, n)
VALUE rb_obj_method(VALUE, VALUE)
#define is_sweeping(objspace)
size_t oldmalloc_increase_limit
void rb_gc_adjust_memory_usage(ssize_t diff)
#define RUBY_INTERNAL_EVENT_GC_ENTER
size_t total_allocated_objects
void rb_free_generic_ivar(VALUE)
struct rb_objspace::@62 rincgc
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
void rb_gc_mark_machine_stack(const rb_execution_context_t *ec)
PUREFUNC(static inline int is_id_value(rb_objspace_t *objspace, VALUE ptr))
void * rb_alloc_tmp_buffer_with_count(volatile VALUE *store, size_t size, size_t cnt)
void rb_ary_free(VALUE ary)
void rb_mark_end_proc(void)
#define GC_HEAP_GROWTH_MAX_SLOTS
VALUE rb_class_name(VALUE)
VALUE rb_undefine_finalizer(VALUE obj)
#define RGENGC_OLD_NEWOBJ_CHECK
#define RUBY_SAFE_LEVEL_MAX
void rb_vm_mark(void *ptr)
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
size_t rb_generic_ivar_memsize(VALUE)
NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, rb_objspace_t *objspace))
const rb_env_t * rb_vm_env_prev_env(const rb_env_t *env)
void rb_gc_copy_finalizer(VALUE dest, VALUE obj)
RUBY_EXTERN VALUE rb_cObject
void rb_gc_unregister_address(VALUE *addr)
size_t st_memsize(const st_table *tab)
void rb_free_tmp_buffer(volatile VALUE *store)
#define VALGRIND_MAKE_MEM_DEFINED(p, n)
#define GET_HEAP_MARKING_BITS(x)
VALUE rb_str_cat2(VALUE, const char *)
struct rb_data_type_struct::@73 function
RUBY_EXTERN VALUE rb_cBasicObject
struct gc_profile_record gc_profile_record
VALUE rb_any_to_s(VALUE)
call-seq: obj.to_s -> string
RUBY_EXTERN VALUE rb_mKernel
unsigned int before_sweep
struct rb_objspace::@61 rgengc
#define nonspecial_obj_id(obj)
VALUE rb_gc_latest_gc_info(VALUE key)
unsigned int dont_incremental
struct rb_imemo_alloc_struct * next
struct rb_method_definition_struct *const def
void rb_free_method_entry(const rb_method_entry_t *me)
#define RUBY_DTRACE_GC_HOOK(name)
void rb_define_const(VALUE, const char *, VALUE)
VALUE rb_obj_is_mutex(VALUE obj)
#define MALLOC_ALLOCATED_SIZE
rb_atomic_t cnt[RUBY_NSIG]
struct rb_io_t::rb_io_enc_t encs
#define rb_data_object_alloc
VALUE writeconv_asciicompat
#define RVALUE_UNCOLLECTIBLE_BITMAP(obj)
size_t onig_region_memsize(const OnigRegion *regs)
struct force_finalize_list * next
VALUE rb_gc_mark_node(NODE *obj)
#define range(low, item, hi)
void rb_gc_register_mark_object(VALUE obj)
#define ATOMIC_SIZE_EXCHANGE(var, val)
#define GC_MALLOC_LIMIT_GROWTH_FACTOR
size_t(* dsize)(const void *)
#define is_lazy_sweeping(heap)
#define RVALUE_MARK_BITMAP(obj)
size_t rb_objspace_data_type_memsize(VALUE obj)
#define is_full_marking(objspace)
NODE * rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
double heap_free_slots_goal_ratio
void * ruby_mimmalloc(size_t size)
st_table * finalizer_table
#define rb_thread_raised_clear(th)
#define RUBY_INTERNAL_EVENT_GC_END_MARK
attr_writer or attr_accessor
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for module.
volatile VALUE * rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
#define RARRAY_CONST_PTR(a)
struct RRational rational
void rb_id_table_foreach_values(struct rb_id_table *tbl, rb_id_table_foreach_values_func_t *func, void *data)
VALUE * ruby_initial_gc_stress_ptr
VALUE rb_obj_freeze(VALUE)
call-seq: obj.freeze -> obj
double malloc_limit_growth_factor
VALUE rb_obj_is_proc(VALUE)
void ruby_malloc_size_overflow(size_t count, size_t elsize)
VALUE rb_sprintf(const char *format,...)
int rb_objspace_internal_object_p(VALUE obj)
#define GET_HEAP_MARK_BITS(x)
#define STACKFRAME_FOR_CALL_CFUNC
#define RICLASS_IS_ORIGIN
int rb_obj_respond_to(VALUE, ID, int)
#define RESTORE_FINALIZER()
PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt,...), 3, 4)
unsigned int has_remembered_objects
size_t rb_obj_gc_flags(VALUE obj, ID *flags, size_t max)
#define MARKED_IN_BITMAP(bits, p)
#define MEMMOVE(p1, p2, type, n)
size_t rb_str_memsize(VALUE)
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
#define RUBY_INTERNAL_EVENT_GC_EXIT
#define rb_thread_raised_set(th, f)
unsigned char buf[MIME_BUF_SIZE]
void Init_stack(volatile VALUE *addr)
VALUE tied_io_for_writing
#define ATOMIC_SIZE_INC(var)
#define MALLOC_ALLOCATED_SIZE_CHECK
IFUNC (Internal FUNCtion)
VALUE rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
int rb_threadptr_during_gc(rb_thread_t *th)
#define EXEC_EVENT_HOOK(th_, flag_, self_, id_, called_id_, klass_, data_)
#define gc_prof_record(objspace)
const char * rb_objspace_data_type_name(VALUE obj)
const VALUE defined_class
#define GET_HEAP_WB_UNPROTECTED_BITS(x)
double heap_free_slots_max_ratio
void rb_mark_tbl(st_table *tbl)
RUBY_FUNC_EXPORTED size_t rb_ary_memsize(VALUE ary)
VALUE rb_gc_disable(void)
VALUE rb_check_funcall(VALUE, ID, int, const VALUE *)
int clock_gettime(clockid_t, struct timespec *)
#define SET_MACHINE_STACK_END(p)
void ruby_init_stack(volatile VALUE *)
const char * rb_id2name(ID)
VALUE rb_str_new_cstr(const char *)
struct heap_page * free_next
int rb_sigaltstack_size(void)
void * ruby_xrealloc(void *ptr, size_t new_size)
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
struct RVALUE::@52::@53 free
void ruby_gc_set_params(int safe_level)
void rb_class_detach_module_subclasses(VALUE klass)
int rb_garbage_collect(void)
#define rb_objspace_of(vm)
Kernel::send, Proc::call, etc.
struct rb_execution_context_struct::@143 machine
register unsigned int len
VALUE rb_define_module_under(VALUE outer, const char *name)
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
void rb_set_safe_level_force(int)
void * ruby_xmalloc(size_t size)
size_t rb_node_memsize(VALUE obj)
#define MARK_IN_BITMAP(bits, p)
#define GC_HEAP_FREE_SLOTS_GOAL_RATIO
#define RVALUE_WB_UNPROTECTED_BITMAP(obj)
bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT]
void * ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
#define GC_HEAP_FREE_SLOTS
#define PUSH_MARK_FUNC_DATA(v)
void rb_mark_generic_ivar(VALUE)
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
#define RARRAY_AREF(a, i)
#define GC_ENABLE_INCREMENTAL_MARK
#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj)
VALUE rb_block_proc(void)
void rb_objspace_reachable_objects_from(VALUE obj, void(func)(VALUE, void *), void *data)
int ruby_thread_has_gvl_p(void)
#define RUBY_INTERNAL_EVENT_NEWOBJ
#define heap_allocatable_pages
#define RUBY_INTERNAL_EVENT_FREEOBJ
int getrusage(int who, struct rusage *usage)
VALUE rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags)
size_t rb_gc_stat(VALUE key)
void rb_mark_set(st_table *tbl)
#define gc_stress_full_mark_after_malloc_p()
RUBY_SYMBOL_EXPORT_BEGIN void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
const char * ruby_node_name(int node)
VALUE rb_newobj_of(VALUE klass, VALUE flags)
#define RCLASS_IV_INDEX_TBL(c)
VALUE rb_class_path_cached(VALUE)
struct gc_list * global_list
struct RArray::@70::@71 heap
VALUE rb_obj_is_fiber(VALUE obj)
VALUE rb_data_object_zalloc(VALUE, size_t, RUBY_DATA_FUNC, RUBY_DATA_FUNC)
#define RCLASS_CALLABLE_M_TBL(c)
gc_profile_record * records
VALUE rb_proc_new(VALUE(*)(ANYARGS), VALUE)
#define HEAP_PAGE_ALIGN_LOG
#define POP_MARK_FUNC_DATA()
VALUE rb_obj_rgengc_promoted_p(VALUE obj)
struct rb_encoding_entry * list
int each_obj_callback(void *, void *, size_t, void *)
struct rb_heap_struct rb_heap_t
int rb_singleton_class_internal_p(VALUE sklass)
rb_objspace_t * rb_objspace_alloc(void)
#define VM_UNREACHABLE(func)
VALUE rb_errinfo(void)
The current exception in the current thread.
#define TypedData_Make_Struct(klass, type, data_type, sval)
const char * rb_obj_info(VALUE obj)
VALUE rb_iseq_path(const rb_iseq_t *iseq)
struct mark_stack mark_stack_t
#define rb_thread_raised_p(th, f)
void rb_gc_writebarrier_remember(VALUE obj)
#define RETURN_ENUMERATOR(obj, argc, argv)
unsigned int during_incremental_marking
#define heap_pages_sorted
size_t total_allocated_pages
struct rmatch_offset * char_offset
VALUE rb_hash_set_default_proc(VALUE hash, VALUE proc)
#define RB_DEBUG_COUNTER_INC(type)
volatile VALUE rb_gc_guarded_val
void rb_gc_mark_maybe(VALUE obj)
rb_execution_context_t ec
struct rb_objspace::@56 malloc_params
#define GC_HEAP_INIT_SLOTS
#define gc_event_hook_available_p(objspace)
void rb_gc_mark_global_tbl(void)
#define GC_OLDMALLOC_LIMIT_MIN
const char * rb_raw_obj_info(char *buff, const int buff_size, VALUE obj)
#define ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS
void(* func)(const char *category, VALUE, void *)
const rb_data_type_t * type
#define gc_mode(objspace)
#define RTYPEDDATA_DATA(v)
void rb_gc_writebarrier_unprotect(VALUE obj)
void(* mark_func)(VALUE v, void *data)
struct heap_page * using_page
#define rb_check_frozen(obj)
#define RVALUE_PAGE_MARKING(page, obj)
struct RTypedData typeddata
rb_id_table_iterator_result
RUBY_EXTERN VALUE rb_stdout
void rb_gc_call_finalizer_at_exit(void)
#define SPECIAL_CONST_P(x)
struct rb_objspace rb_objspace_t
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
VALUE ruby_vm_special_exception_copy(VALUE)
VALUE rb_define_module(const char *name)
#define gc_prof_enabled(objspace)
bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT]
size_t rb_id_table_memsize(const struct rb_id_table *tbl)
void rb_mark_hash(st_table *tbl)
#define GC_MALLOC_LIMIT_MIN
each_obj_callback * callback
VALUE rb_str_buf_new(long)
void rb_gc_mark_encodings(void)
#define RTYPEDDATA_TYPE(v)
struct rb_objspace::@57 flags
struct rb_classext_struct rb_classext_t
#define GC_OLDMALLOC_LIMIT_MAX
attr_reader or attr_accessor
void rb_gc_verify_internal_consistency(void)
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
VALUE rb_str_append(VALUE, VALUE)
#define CALC_EXACT_MALLOC_SIZE
struct heap_page ** sorted
double oldmalloc_limit_growth_factor
void rb_ary_delete_same(VALUE ary, VALUE item)
struct stack_chunk stack_chunk_t
#define CLEAR_IN_BITMAP(bits, p)
#define BIGNUM_EMBED_FLAG
unsigned int has_uncollectible_shady_objects
bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT]
struct heap_page * free_pages
rb_iseq_location_t location
#define TRY_WITH_GC(alloc)
struct stack_chunk * next
struct heap_page * sweep_pages