The Ruby Cross Reference

Implementation: mri jruby rubinius
Version: 1.8.7-p374 1.9.1-p431 1.9.2-p381 1.9.3-p547 2.0.0-p481 2.1.0-p0 2.1.1 2.1.2 HEAD
001 /**********************************************************************
002 
003   gc.c -
004 
005   $Author$
006   created at: Tue Oct  5 09:44:46 JST 1993
007 
008   Copyright (C) 1993-2007 Yukihiro Matsumoto
009   Copyright (C) 2000  Network Applied Communication Laboratory, Inc.
010   Copyright (C) 2000  Information-technology Promotion Agency, Japan
011 
012 **********************************************************************/
013 
014 #include "ruby/ruby.h"
015 #include "ruby/st.h"
016 #include "ruby/re.h"
017 #include "ruby/io.h"
018 #include "ruby/thread.h"
019 #include "ruby/util.h"
020 #include "ruby/debug.h"
021 #include "eval_intern.h"
022 #include "vm_core.h"
023 #include "internal.h"
024 #include "gc.h"
025 #include "constant.h"
026 #include "ruby_atomic.h"
027 #include "probes.h"
028 #include <stdio.h>
029 #include <stdarg.h>
030 #include <setjmp.h>
031 #include <sys/types.h>
032 #include <assert.h>
033 
034 #ifdef HAVE_SYS_TIME_H
035 #include <sys/time.h>
036 #endif
037 
038 #ifdef HAVE_SYS_RESOURCE_H
039 #include <sys/resource.h>
040 #endif
041 #if defined(__native_client__) && defined(NACL_NEWLIB)
042 # include "nacl/resource.h"
043 # undef HAVE_POSIX_MEMALIGN
044 # undef HAVE_MEMALIGN
045 
046 #endif
047 
048 #if defined _WIN32 || defined __CYGWIN__
049 #include <windows.h>
050 #elif defined(HAVE_POSIX_MEMALIGN)
051 #elif defined(HAVE_MEMALIGN)
052 #include <malloc.h>
053 #endif
054 
055 #ifdef HAVE_VALGRIND_MEMCHECK_H
056 # include <valgrind/memcheck.h>
057 # ifndef VALGRIND_MAKE_MEM_DEFINED
058 #  define VALGRIND_MAKE_MEM_DEFINED(p, n) VALGRIND_MAKE_READABLE((p), (n))
059 # endif
060 # ifndef VALGRIND_MAKE_MEM_UNDEFINED
061 #  define VALGRIND_MAKE_MEM_UNDEFINED(p, n) VALGRIND_MAKE_WRITABLE((p), (n))
062 # endif
063 #else
064 # define VALGRIND_MAKE_MEM_DEFINED(p, n) 0
065 # define VALGRIND_MAKE_MEM_UNDEFINED(p, n) 0
066 #endif
067 
068 #define rb_setjmp(env) RUBY_SETJMP(env)
069 #define rb_jmp_buf rb_jmpbuf_t
070 
071 #if defined(HAVE_RB_GC_GUARDED_PTR) && HAVE_RB_GC_GUARDED_PTR
072 volatile VALUE *
073 rb_gc_guarded_ptr(volatile VALUE *ptr)
074 {
075     return ptr;
076 }
077 #endif
078 
079 #ifndef GC_MALLOC_LIMIT
080 #define GC_MALLOC_LIMIT 8000000
081 #endif
082 #define HEAP_MIN_SLOTS 10000
083 #define FREE_MIN  4096
084 #define HEAP_GROWTH_FACTOR 1.8
085 
086 typedef struct {
087     unsigned int initial_malloc_limit;
088     unsigned int initial_heap_min_slots;
089     unsigned int initial_free_min;
090     double initial_growth_factor;
091 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
092     VALUE gc_stress;
093 #endif
094 } ruby_gc_params_t;
095 
096 static ruby_gc_params_t initial_params = {
097     GC_MALLOC_LIMIT,
098     HEAP_MIN_SLOTS,
099     FREE_MIN,
100     HEAP_GROWTH_FACTOR,
101 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
102     FALSE,
103 #endif
104 };
105 
106 #define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
107 
108 void rb_gcdebug_print_obj_condition(VALUE obj);
109 
110 #if USE_RGENGC
111 /* RGENGC_DEBUG:
112  * 1: basic information
113  * 2: remember set operation
114  * 3: mark
115  * 4:
116  * 5: sweep
117  */
118 #ifndef RGENGC_DEBUG
119 #define RGENGC_DEBUG       0
120 #endif
121 
122 /* RGENGC_CHECK_MODE
123  * 0: disable all assertions
124  * 1: enable assertions (to debug RGenGC)
125  * 2: enable bits check (for debugging)
126  * 3: show all references
127  */
128 #ifndef RGENGC_CHECK_MODE
129 #define RGENGC_CHECK_MODE  0
130 #endif
131 
132 /* RGENGC_PROFILE
133  * 0: disable RGenGC profiling
134  * 1: enable profiling for basic information
135  * 2: enable profiling for each types
136  */
137 #ifndef RGENGC_PROFILE
138 #define RGENGC_PROFILE     0
139 #endif
140 
141 #else /* USE_RGENGC */
142 #define RGENGC_DEBUG       0
143 #define RGENGC_CHECK_MODE  0
144 #define RGENGC_PROFILE     0
145 #endif
146 
147 #ifndef GC_PROFILE_MORE_DETAIL
148 #define GC_PROFILE_MORE_DETAIL 0
149 #endif
150 #ifndef GC_ENABLE_LAZY_SWEEP
151 #define GC_ENABLE_LAZY_SWEEP   1
152 #endif
153 #ifndef CALC_EXACT_MALLOC_SIZE
154 #define CALC_EXACT_MALLOC_SIZE 0
155 #endif
156 
157 typedef enum {
158     GPR_FLAG_NONE            = 0x000,
159     /* major reason */
160     GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
161     GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
162     GPR_FLAG_MAJOR_BY_SHADY  = 0x004,
163     GPR_FLAG_MAJOR_BY_RESCAN = 0x008,
164     GPR_FLAG_MAJOR_BY_STRESS = 0x010,
165     GPR_FLAG_MAJOR_MASK      = 0x01f,
166 
167     /* gc reason */
168     GPR_FLAG_NEWOBJ          = 0x020,
169     GPR_FLAG_MALLOC          = 0x040,
170     GPR_FLAG_METHOD          = 0x080,
171     GPR_FLAG_CAPI            = 0x100,
172     GPR_FLAG_STRESS          = 0x200,
173 
174     /* others */
175     GPR_FLAG_IMMEDIATE_SWEEP = 0x400,
176     GPR_FLAG_HAVE_FINALIZE   = 0x800
177 
178 } gc_profile_record_flag;
179 
180 typedef struct gc_profile_record {
181     int flags;
182 
183     double gc_time;
184     double gc_invoke_time;
185 
186     size_t heap_total_objects;
187     size_t heap_use_size;
188     size_t heap_total_size;
189 
190 #if GC_PROFILE_MORE_DETAIL
191     double gc_mark_time;
192     double gc_sweep_time;
193 
194     size_t heap_use_slots;
195     size_t heap_live_objects;
196     size_t heap_free_objects;
197 
198     size_t allocate_increase;
199     size_t allocate_limit;
200 #if CALC_EXACT_MALLOC_SIZE
201     size_t allocated_size;
202 #endif
203 
204     double prepare_time;
205     size_t removing_objects;
206     size_t empty_objects;
207 #endif
208 
209 #if RGENGC_PROFILE > 0
210     size_t oldgen_objects;
211     size_t remembered_normal_objects;
212     size_t remembered_shady_objects;
213 #endif
214 } gc_profile_record;
215 
216 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
217 #pragma pack(push, 1) /* magic for reducing sizeof(RVALUE): 24 -> 20 */
218 #endif
219 
220 typedef struct RVALUE {
221     union {
222         struct {
223             VALUE flags;                /* always 0 for freed obj */
224             struct RVALUE *next;
225         } free;
226         struct RBasic  basic;
227         struct RObject object;
228         struct RClass  klass;
229         struct RFloat  flonum;
230         struct RString string;
231         struct RArray  array;
232         struct RRegexp regexp;
233         struct RHash   hash;
234         struct RData   data;
235         struct RTypedData   typeddata;
236         struct RStruct rstruct;
237         struct RBignum bignum;
238         struct RFile   file;
239         struct RNode   node;
240         struct RMatch  match;
241         struct RRational rational;
242         struct RComplex complex;
243         struct {
244             struct RBasic basic;
245             VALUE v1;
246             VALUE v2;
247             VALUE v3;
248         } values;
249     } as;
250 #ifdef GC_DEBUG
251     const char *file;
252     int   line;
253 #endif
254 } RVALUE;
255 
256 #if defined(_MSC_VER) || defined(__BORLANDC__) || defined(__CYGWIN__)
257 #pragma pack(pop)
258 #endif
259 
260 typedef uintptr_t bits_t;
261 enum {
262     BITS_SIZE = sizeof(bits_t),
263     BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
264 };
265 
266 struct heaps_header {
267     struct heaps_slot *base;
268     RVALUE *start;
269     RVALUE *end;
270     size_t limit;
271 };
272 
273 struct gc_list {
274     VALUE *varptr;
275     struct gc_list *next;
276 };
277 
278 #define STACK_CHUNK_SIZE 500
279 
280 typedef struct stack_chunk {
281     VALUE data[STACK_CHUNK_SIZE];
282     struct stack_chunk *next;
283 } stack_chunk_t;
284 
285 typedef struct mark_stack {
286     stack_chunk_t *chunk;
287     stack_chunk_t *cache;
288     size_t index;
289     size_t limit;
290     size_t cache_size;
291     size_t unused_cache_size;
292 } mark_stack_t;
293 
294 typedef struct rb_objspace {
295     struct {
296         size_t limit;
297         size_t increase;
298         size_t increase2;
299 #if CALC_EXACT_MALLOC_SIZE
300         size_t allocated_size;
301         size_t allocations;
302 #endif
303     } malloc_params;
304     struct {
305         size_t increment;
306         struct heaps_slot *ptr;
307         struct heaps_slot *sweep_slots;
308         struct heaps_slot *free_slots;
309         struct heaps_header **sorted;
310         size_t length;
311         size_t used;
312         RVALUE *range[2];
313         struct heaps_header *freed;
314         size_t free_num;
315         size_t free_min;
316         size_t final_num;
317         size_t do_heap_free;
318     } heap;
319     struct {
320         int dont_gc;
321         int dont_lazy_sweep;
322         int during_gc;
323         rb_atomic_t finalizing;
324     } flags;
325     struct {
326         st_table *table;
327         RVALUE *deferred;
328     } final;
329     mark_stack_t mark_stack;
330     struct {
331         int run;
332         gc_profile_record *records;
333         gc_profile_record *current_record;
334         size_t next_index;
335         size_t size;
336 
337 #if GC_PROFILE_MORE_DETAIL
338         double prepare_time;
339 #endif
340         double invoke_time;
341 
342 #if USE_RGENGC
343         size_t minor_gc_count;
344         size_t major_gc_count;
345 #ifdef RGENGC_PROFILE
346         size_t generated_normal_object_count;
347         size_t generated_shady_object_count;
348         size_t shade_operation_count;
349         size_t promote_operation_count;
350         size_t remembered_normal_object_count;
351         size_t remembered_shady_object_count;
352 
353 #if RGENGC_PROFILE >= 2
354         size_t generated_normal_object_count_types[RUBY_T_MASK];
355         size_t generated_shady_object_count_types[RUBY_T_MASK];
356         size_t shade_operation_count_types[RUBY_T_MASK];
357         size_t promote_operation_count_types[RUBY_T_MASK];
358         size_t remembered_normal_object_count_types[RUBY_T_MASK];
359         size_t remembered_shady_object_count_types[RUBY_T_MASK];
360 #endif
361 #endif /* RGENGC_PROFILE */
362 #endif /* USE_RGENGC */
363 
364         /* temporary profiling space */
365         double gc_sweep_start_time;
366         size_t total_allocated_object_num_at_gc_start;
367         size_t heaps_used_at_gc_start;
368     } profile;
369     struct gc_list *global_list;
370     size_t count;
371     size_t total_allocated_object_num;
372     size_t total_freed_object_num;
373     rb_event_flag_t hook_events; /* this place may be affinity with memory cache */
374     VALUE gc_stress;
375 
376     struct mark_func_data_struct {
377         void *data;
378         void (*mark_func)(VALUE v, void *data);
379     } *mark_func_data;
380 
381 #if USE_RGENGC
382     struct {
383         int during_minor_gc;
384         int parent_object_is_promoted;
385 
386         /* for check mode */
387         VALUE parent_object;
388         unsigned int  monitor_level;
389         st_table *monitored_object_table;
390 
391         int need_major_gc;
392         size_t remembered_shady_object_count;
393         size_t remembered_shady_object_limit;
394         size_t oldgen_object_count;
395         size_t oldgen_object_limit;
396 #if RGENGC_CHECK_MODE >= 2
397         int have_saved_bitmaps;
398 #endif
399     } rgengc;
400 #endif /* USE_RGENGC */
401 } rb_objspace_t;
402 
403 
404 #ifndef HEAP_ALIGN_LOG
405 /* default tiny heap size: 16KB */
406 #define HEAP_ALIGN_LOG 14
407 #endif
408 #define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
409 enum {
410     HEAP_ALIGN = (1UL << HEAP_ALIGN_LOG),
411     HEAP_ALIGN_MASK = (~(~0UL << HEAP_ALIGN_LOG)),
412     REQUIRED_SIZE_BY_MALLOC = (sizeof(size_t) * 5),
413     HEAP_SIZE = (HEAP_ALIGN - REQUIRED_SIZE_BY_MALLOC),
414     HEAP_OBJ_LIMIT = (unsigned int)((HEAP_SIZE - sizeof(struct heaps_header))/sizeof(struct RVALUE)),
415     HEAP_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_SIZE, sizeof(struct RVALUE)), BITS_BITLENGTH),
416     HEAP_BITMAP_SIZE = ( BITS_SIZE * HEAP_BITMAP_LIMIT),
417     HEAP_BITMAP_PLANES = USE_RGENGC ? 3 : 1 /* RGENGC: mark bits, rememberset bits and oldgen bits */
418 };
419 
420 struct heaps_slot {
421     struct heaps_header *header;
422     RVALUE *freelist;
423     struct heaps_slot *next;
424     struct heaps_slot *prev;
425     struct heaps_slot *free_next;
426 
427     bits_t mark_bits[HEAP_BITMAP_LIMIT];
428 #if USE_RGENGC
429     bits_t rememberset_bits[HEAP_BITMAP_LIMIT];
430     bits_t oldgen_bits[HEAP_BITMAP_LIMIT];
431 #if RGENGC_CHECK_MODE >= 2
432     bits_t saved_mark_bits[HEAP_BITMAP_LIMIT];
433     bits_t saved_rememberset_bits[HEAP_BITMAP_LIMIT];
434     bits_t saved_oldgen_bits[HEAP_BITMAP_LIMIT];
435 #endif
436 #endif
437 };
438 
439 #define HEAP_HEADER(p)               ((struct heaps_header *)(p))
440 #define GET_HEAP_HEADER(x)           (HEAP_HEADER((bits_t)(x) & ~(HEAP_ALIGN_MASK)))
441 #define GET_HEAP_SLOT(x)             (GET_HEAP_HEADER(x)->base)
442 #define GET_HEAP_MARK_BITS(x)        (&GET_HEAP_SLOT(x)->mark_bits[0])
443 #define GET_HEAP_REMEMBERSET_BITS(x) (&GET_HEAP_SLOT(x)->rememberset_bits[0])
444 #define GET_HEAP_OLDGEN_BITS(x)      (&GET_HEAP_SLOT(x)->oldgen_bits[0])
445 #define NUM_IN_SLOT(p)               (((bits_t)(p) & HEAP_ALIGN_MASK)/sizeof(RVALUE))
446 #define BITMAP_INDEX(p)              (NUM_IN_SLOT(p) / BITS_BITLENGTH )
447 #define BITMAP_OFFSET(p)             (NUM_IN_SLOT(p) & (BITS_BITLENGTH-1))
448 #define BITMAP_BIT(p)                ((bits_t)1 << BITMAP_OFFSET(p))
449 /* Bitmap Operations */
450 #define MARKED_IN_BITMAP(bits, p)    ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
451 #define MARK_IN_BITMAP(bits, p)      ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
452 #define CLEAR_IN_BITMAP(bits, p)     ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
453 
454 /* Aliases */
455 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
456 #define rb_objspace (*GET_VM()->objspace)
457 #define ruby_initial_gc_stress  initial_params.gc_stress
458 VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
459 #else
460 static rb_objspace_t rb_objspace = {{GC_MALLOC_LIMIT}};
461 VALUE *ruby_initial_gc_stress_ptr = &rb_objspace.gc_stress;
462 #endif
463 #define malloc_limit            objspace->malloc_params.limit
464 #define malloc_increase         objspace->malloc_params.increase
465 #define malloc_increase2        objspace->malloc_params.increase2
466 #define malloc_allocated_size   objspace->malloc_params.allocated_size
467 #define heaps                   objspace->heap.ptr
468 #define heaps_length            objspace->heap.length
469 #define heaps_used              objspace->heap.used
470 #define lomem                   objspace->heap.range[0]
471 #define himem                   objspace->heap.range[1]
472 #define heaps_inc               objspace->heap.increment
473 #define heaps_freed             objspace->heap.freed
474 #define dont_gc                 objspace->flags.dont_gc
475 #define during_gc               objspace->flags.during_gc
476 #define finalizing              objspace->flags.finalizing
477 #define finalizer_table         objspace->final.table
478 #define deferred_final_list     objspace->final.deferred
479 #define global_List             objspace->global_list
480 #define ruby_gc_stress          objspace->gc_stress
481 #define initial_malloc_limit    initial_params.initial_malloc_limit
482 #define initial_heap_min_slots  initial_params.initial_heap_min_slots
483 #define initial_free_min        initial_params.initial_free_min
484 #define initial_growth_factor   initial_params.initial_growth_factor
485 #define monitor_level           objspace->rgengc.monitor_level
486 #define monitored_object_table  objspace->rgengc.monitored_object_table
487 
488 
489 #define is_lazy_sweeping(objspace) ((objspace)->heap.sweep_slots != 0)
490 
491 #if SIZEOF_LONG == SIZEOF_VOIDP
492 # define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
493 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
494 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
495 # define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
496 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
497    ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
498 #else
499 # error not supported
500 #endif
501 
502 #define RANY(o) ((RVALUE*)(o))
503 #define has_free_object (objspace->heap.free_slots && objspace->heap.free_slots->freelist)
504 
505 int ruby_gc_debug_indent = 0;
506 VALUE rb_mGC;
507 extern st_table *rb_class_tbl;
508 int ruby_disable_gc_stress = 0;
509 
510 static void rb_objspace_call_finalizer(rb_objspace_t *objspace);
511 static VALUE define_final0(VALUE obj, VALUE block);
512 VALUE rb_define_final(VALUE obj, VALUE block);
513 VALUE rb_undefine_final(VALUE obj);
514 static void run_final(rb_objspace_t *objspace, VALUE obj);
515 static void initial_expand_heap(rb_objspace_t *objspace);
516 
517 static void negative_size_allocation_error(const char *);
518 static void *aligned_malloc(size_t, size_t);
519 static void aligned_free(void *);
520 
521 static void init_mark_stack(mark_stack_t *stack);
522 
523 static VALUE lazy_sweep_enable(void);
524 static int garbage_collect(rb_objspace_t *, int full_mark, int immediate_sweep, int reason);
525 static int garbage_collect_body(rb_objspace_t *, int full_mark, int immediate_sweep, int reason);
526 static int gc_prepare_free_objects(rb_objspace_t *);
527 static void mark_tbl(rb_objspace_t *, st_table *);
528 static void rest_sweep(rb_objspace_t *);
529 static void gc_mark_stacked_objects(rb_objspace_t *);
530 
531 static void gc_mark(rb_objspace_t *objspace, VALUE ptr);
532 static void gc_mark_maybe(rb_objspace_t *objspace, VALUE ptr);
533 static void gc_mark_children(rb_objspace_t *objspace, VALUE ptr);
534 
535 static double getrusage_time(void);
536 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, int reason);
537 static inline void gc_prof_timer_start(rb_objspace_t *);
538 static inline void gc_prof_timer_stop(rb_objspace_t *);
539 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
540 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
541 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
542 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
543 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
544 static inline void gc_prof_set_heap_info(rb_objspace_t *);
545 
546 #define gc_prof_record(objspace) (objspace)->profile.current_record
547 
548 static const char *obj_type_name(VALUE obj);
549 
550 #if USE_RGENGC
551 static int rgengc_remembered(rb_objspace_t *objspace, VALUE obj);
552 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
553 static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace);
554 static void rgengc_rememberset_mark(rb_objspace_t *objspace);
555 
556 #define FL_TEST2(x,f)         ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? (rb_bug("FL_TEST2: SPECIAL_CONST"), 0) : FL_TEST_RAW((x),(f)) != 0)
557 #define FL_SET2(x,f)          do {if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) rb_bug("FL_SET2: SPECIAL_CONST");   RBASIC(x)->flags |= (f);} while (0)
558 #define FL_UNSET2(x,f)        do {if (RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) rb_bug("FL_UNSET2: SPECIAL_CONST"); RBASIC(x)->flags &= ~(f);} while (0)
559 
560 #define RVALUE_SHADY(obj)    (!FL_TEST2((check_bitmap_consistency((VALUE)obj)), FL_WB_PROTECTED))
561 #define RVALUE_PROMOTED(obj) FL_TEST2(check_bitmap_consistency((VALUE)obj), FL_OLDGEN)
562 
563 #define RVALUE_PROMOTED_FROM_BITMAP(x) MARKED_IN_BITMAP(GET_HEAP_OLDGEN_BITS(x),x)
564 
565 static inline VALUE
566 check_bitmap_consistency(VALUE obj)
567 {
568 #if RUBY_CHECK_MODE > 0
569     int oldgen_bitmap = MARKED_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj), obj) != 0;
570 
571     if (FL_TEST2((obj), FL_OLDGEN) != oldgen_bitmap) {
572         rb_bug("check_bitmap_consistency: oldgen flag of %p (%s) is %d, but bitmap is %d",
573                (void *)obj, obj_type_name(obj), FL_TEST2((obj), FL_OLDGEN), oldgen_bitmap);
574     }
575     if (FL_TEST2((obj), FL_WB_PROTECTED)) {
576         /* non-shady */
577     }
578     else {
579         /* shady */
580         if (oldgen_bitmap) {
581             rb_bug("check_bitmap_consistency: %p (%s) is shady, but bitmap specifies oldgen",
582                    (void *)obj, obj_type_name(obj));
583         }
584     }
585 #endif
586     return obj;
587 }
588 
589 static inline void
590 RVALUE_PROMOTE(VALUE obj)
591 {
592     check_bitmap_consistency(obj);
593     MARK_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj), obj);
594     FL_SET2(obj, FL_OLDGEN);
595 #if RGENGC_PROFILE >= 1
596     {
597         rb_objspace_t *objspace = &rb_objspace;
598         objspace->profile.promote_operation_count++;
599 #if RGENGC_PROFILE >= 2
600         objspace->profile.promote_operation_count_types[BUILTIN_TYPE(obj)]++;
601 #endif
602     }
603 #endif
604 }
605 
606 static inline int
607 is_before_sweep(VALUE obj)
608 {
609     struct heaps_slot *slot;
610     rb_objspace_t *objspace = &rb_objspace;
611     if (is_lazy_sweeping(objspace)) {
612         slot = objspace->heap.sweep_slots;
613         while (slot) {
614             if (slot->header == GET_HEAP_HEADER(obj))
615                 return TRUE;
616             slot = slot->next;
617         }
618     }
619     return FALSE;
620 }
621 
622 static inline void
623 RVALUE_DEMOTE(VALUE obj)
624 {
625     check_bitmap_consistency(obj);
626     FL_UNSET2(obj, FL_OLDGEN);
627     CLEAR_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj), obj);
628 }
629 #endif
630 
631 static void
632 rgengc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
633 {
634     if (level <= RGENGC_DEBUG) {
635         char buf[1024];
636         FILE *out = stderr;
637         va_list args;
638         const char *status = " ";
639 
640 #if USE_RGENGC
641         if (during_gc) {
642             status = objspace->rgengc.during_minor_gc ? "-" : "+";
643         }
644 #endif
645 
646         va_start(args, fmt);
647         vsnprintf(buf, 1024, fmt, args);
648         va_end(args);
649 
650         fprintf(out, "%s|", status);
651         fputs(buf, out);
652     }
653 }
654 
655 #define rgengc_report if (RGENGC_DEBUG) rgengc_report_body
656 
657 /*
658   --------------------------- ObjectSpace -----------------------------
659 */
660 
661 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
662 rb_objspace_t *
663 rb_objspace_alloc(void)
664 {
665     rb_objspace_t *objspace = malloc(sizeof(rb_objspace_t));
666     memset(objspace, 0, sizeof(*objspace));
667     malloc_limit = initial_malloc_limit;
668     ruby_gc_stress = ruby_initial_gc_stress;
669 
670     return objspace;
671 }
672 #endif
673 
674 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
675 static void free_stack_chunks(mark_stack_t *);
676 
677 void
678 rb_objspace_free(rb_objspace_t *objspace)
679 {
680     rest_sweep(objspace);
681 
682     if (objspace->profile.records) {
683         free(objspace->profile.records);
684         objspace->profile.records = 0;
685     }
686     if (global_List) {
687         struct gc_list *list, *next;
688         for (list = global_List; list; list = next) {
689             next = list->next;
690             xfree(list);
691         }
692     }
693     if (objspace->heap.sorted) {
694         size_t i;
695         for (i = 0; i < heaps_used; ++i) {
696             aligned_free(objspace->heap.sorted[i]);
697         }
698         free(objspace->heap.sorted);
699         heaps_used = 0;
700         heaps = 0;
701     }
702     free_stack_chunks(&objspace->mark_stack);
703     free(objspace);
704 }
705 #endif
706 
707 void
708 rb_global_variable(VALUE *var)
709 {
710     rb_gc_register_address(var);
711 }
712 
713 static void
714 allocate_sorted_heaps(rb_objspace_t *objspace, size_t next_heaps_length)
715 {
716     struct heaps_header **p;
717     size_t size;
718 
719     size = next_heaps_length*sizeof(struct heaps_header *);
720 
721     if (heaps_used > 0) {
722         p = (struct heaps_header **)realloc(objspace->heap.sorted, size);
723         if (p) objspace->heap.sorted = p;
724     }
725     else {
726         p = objspace->heap.sorted = (struct heaps_header **)malloc(size);
727     }
728 
729     if (p == 0) {
730         during_gc = 0;
731         rb_memerror();
732     }
733 }
734 
735 static void
736 link_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
737 {
738     slot->free_next = objspace->heap.free_slots;
739     objspace->heap.free_slots = slot;
740 }
741 
742 static void
743 unlink_free_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
744 {
745     objspace->heap.free_slots = slot->free_next;
746     slot->free_next = NULL;
747 }
748 
749 static void
750 assign_heap_slot(rb_objspace_t *objspace)
751 {
752     RVALUE *p, *pend, *membase;
753     struct heaps_slot *slot;
754     size_t hi, lo, mid;
755     size_t objs;
756 
757     objs = HEAP_OBJ_LIMIT;
758 
759     p = (RVALUE*)aligned_malloc(HEAP_ALIGN, HEAP_SIZE);
760     if (p == 0) {
761         during_gc = 0;
762         rb_memerror();
763     }
764 
765     /* assign heaps_slot entry */
766     slot = (struct heaps_slot *)malloc(sizeof(struct heaps_slot));
767     if (slot == 0) {
768        aligned_free(p);
769        during_gc = 0;
770        rb_memerror();
771     }
772     MEMZERO((void*)slot, struct heaps_slot, 1);
773 
774     slot->next = heaps;
775     if (heaps) heaps->prev = slot;
776     heaps = slot;
777 
778     /* adjust objs (object number available in this slot) */
779     membase = p;
780     p = (RVALUE*)((VALUE)p + sizeof(struct heaps_header));
781     if ((VALUE)p % sizeof(RVALUE) != 0) {
782        p = (RVALUE*)((VALUE)p + sizeof(RVALUE) - ((VALUE)p % sizeof(RVALUE)));
783        objs = (HEAP_SIZE - (size_t)((VALUE)p - (VALUE)membase))/sizeof(RVALUE);
784     }
785 
786     /* setup objspace->heap.sorted */
787     lo = 0;
788     hi = heaps_used;
789     while (lo < hi) {
790         register RVALUE *mid_membase;
791         mid = (lo + hi) / 2;
792         mid_membase = (RVALUE *)objspace->heap.sorted[mid];
793         if (mid_membase < membase) {
794             lo = mid + 1;
795         }
796         else if (mid_membase > membase) {
797             hi = mid;
798         }
799         else {
800             rb_bug("same heap slot is allocated: %p at %"PRIuVALUE, (void *)membase, (VALUE)mid);
801         }
802     }
803     if (hi < heaps_used) {
804         MEMMOVE(&objspace->heap.sorted[hi+1], &objspace->heap.sorted[hi], struct heaps_header*, heaps_used - hi);
805     }
806 
807     /* setup header */
808     heaps->header = (struct heaps_header *)membase;
809     objspace->heap.sorted[hi] = heaps->header;
810     objspace->heap.sorted[hi]->start = p;
811     objspace->heap.sorted[hi]->end = (p + objs);
812     objspace->heap.sorted[hi]->base = heaps;
813     objspace->heap.sorted[hi]->limit = objs;
814     pend = p + objs;
815     if (lomem == 0 || lomem > p) lomem = p;
816     if (himem < pend) himem = pend;
817     heaps_used++;
818 
819     while (p < pend) {
820         p->as.free.flags = 0;
821         rgengc_report(3, objspace, "assign_heap_slot: %p (%s) is added to freelist\n", p, obj_type_name((VALUE)p));
822         p->as.free.next = heaps->freelist;
823         heaps->freelist = p;
824         p++;
825     }
826 
827     link_free_heap_slot(objspace, heaps);
828 }
829 
830 static void
831 add_heap_slots(rb_objspace_t *objspace, size_t add)
832 {
833     size_t i;
834     size_t next_heaps_length;
835 
836     next_heaps_length = heaps_used + add;
837 
838     if (next_heaps_length > heaps_length) {
839         allocate_sorted_heaps(objspace, next_heaps_length);
840         heaps_length = next_heaps_length;
841     }
842 
843     for (i = 0; i < add; i++) {
844         assign_heap_slot(objspace);
845     }
846     heaps_inc = 0;
847 }
848 
849 static void
850 init_heap(rb_objspace_t *objspace)
851 {
852     add_heap_slots(objspace, HEAP_MIN_SLOTS / HEAP_OBJ_LIMIT);
853     init_mark_stack(&objspace->mark_stack);
854 
855 #ifdef USE_SIGALTSTACK
856     {
857         /* altstack of another threads are allocated in another place */
858         rb_thread_t *th = GET_THREAD();
859         void *tmp = th->altstack;
860         th->altstack = malloc(rb_sigaltstack_size());
861         free(tmp); /* free previously allocated area */
862     }
863 #endif
864 
865     objspace->profile.invoke_time = getrusage_time();
866     finalizer_table = st_init_numtable();
867 }
868 
869 static void
870 initial_expand_heap(rb_objspace_t *objspace)
871 {
872     size_t min_size = initial_heap_min_slots / HEAP_OBJ_LIMIT;
873 
874     if (min_size > heaps_used) {
875         add_heap_slots(objspace, min_size - heaps_used);
876     }
877 }
878 
879 static void
880 set_heaps_increment(rb_objspace_t *objspace)
881 {
882     size_t next_heaps_length = (size_t)(heaps_used * initial_growth_factor);
883 
884     if (next_heaps_length == heaps_used) {
885         next_heaps_length++;
886     }
887 
888     heaps_inc = next_heaps_length - heaps_used;
889 
890     rgengc_report(5, objspace, "set_heaps_increment: heaps_length: %d, next_heaps_length: %d, heaps_inc: %d\n",
891                   heaps_length, next_heaps_length, heaps_inc);
892 
893     if (next_heaps_length > heaps_length) {
894         allocate_sorted_heaps(objspace, next_heaps_length);
895         heaps_length = next_heaps_length;
896     }
897 }
898 
899 static int
900 heaps_increment(rb_objspace_t *objspace)
901 {
902     rgengc_report(5, objspace, "heaps_increment: heaps_inc: %d\n", heaps_inc);
903 
904     if (heaps_inc > 0) {
905         assign_heap_slot(objspace);
906         heaps_inc--;
907         return TRUE;
908     }
909     return FALSE;
910 }
911 
912 void
913 rb_objspace_set_event_hook(const rb_event_flag_t event)
914 {
915     rb_objspace_t *objspace = &rb_objspace;
916     objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
917 }
918 
919 static void
920 gc_event_hook_body(rb_objspace_t *objspace, const rb_event_flag_t event, VALUE data)
921 {
922     rb_thread_t *th = GET_THREAD();
923     EXEC_EVENT_HOOK(th, event, th->cfp->self, 0, 0, data);
924 }
925 
926 #define gc_event_hook(objspace, event, data) do { \
927     if (UNLIKELY((objspace)->hook_events & (event))) { \
928         gc_event_hook_body((objspace), (event), (data)); \
929     } \
930 } while (0)
931 
932 
933 static VALUE
934 newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3)
935 {
936     rb_objspace_t *objspace = &rb_objspace;
937     VALUE obj;
938 
939     if (UNLIKELY(during_gc)) {
940         dont_gc = 1;
941         during_gc = 0;
942         rb_bug("object allocation during garbage collection phase");
943     }
944 
945     if (UNLIKELY(ruby_gc_stress && !ruby_disable_gc_stress)) {
946         if (!garbage_collect(objspace, FALSE, FALSE, GPR_FLAG_NEWOBJ)) {
947             during_gc = 0;
948             rb_memerror();
949         }
950     }
951 
952     if (UNLIKELY(!has_free_object)) {
953         if (!gc_prepare_free_objects(objspace)) {
954             during_gc = 0;
955             rb_memerror();
956         }
957     }
958 
959     obj = (VALUE)objspace->heap.free_slots->freelist;
960     objspace->heap.free_slots->freelist = RANY(obj)->as.free.next;
961     if (objspace->heap.free_slots->freelist == NULL) {
962         unlink_free_heap_slot(objspace, objspace->heap.free_slots);
963     }
964 
965     /* OBJSETUP */
966     RBASIC(obj)->flags = flags;
967     RBASIC_SET_CLASS(obj, klass);
968     if (rb_safe_level() >= 3) FL_SET((obj), FL_TAINT);
969     RANY(obj)->as.values.v1 = v1;
970     RANY(obj)->as.values.v2 = v2;
971     RANY(obj)->as.values.v3 = v3;
972 
973 #ifdef GC_DEBUG
974     RANY(obj)->file = rb_sourcefile();
975     RANY(obj)->line = rb_sourceline();
976 #endif
977 
978 #if RGENGC_PROFILE
979     if (flags & FL_WB_PROTECTED) {
980         objspace->profile.generated_normal_object_count++;
981 #if RGENGC_PROFILE >= 2
982         objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
983 #endif
984     }
985     else {
986         objspace->profile.generated_shady_object_count++;
987 #if RGENGC_PROFILE >= 2
988         objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
989 #endif
990     }
991 #endif
992 
993     rgengc_report(5, objspace, "newobj: %p (%s)\n", (void *)obj, obj_type_name(obj));
994 
995 #if USE_RGENGC && RGENGC_CHECK_MODE
996     if (RVALUE_PROMOTED(obj)) rb_bug("newobj: %p (%s) is promoted.\n", (void *)obj, obj_type_name(obj));
997     if (rgengc_remembered(objspace, (VALUE)obj)) rb_bug("newobj: %p (%s) is remembered.\n", (void *)obj, obj_type_name(obj));
998 #endif
999 
1000     objspace->total_allocated_object_num++;
1001     gc_event_hook(objspace, RUBY_INTERNAL_EVENT_NEWOBJ, obj);
1002 
1003     return obj;
1004 }
1005 
1006 VALUE
1007 rb_newobj(void)
1008 {
1009     return newobj_of(0, T_NONE, 0, 0, 0);
1010 }
1011 
1012 VALUE
1013 rb_newobj_of(VALUE klass, VALUE flags)
1014 {
1015     return newobj_of(klass, flags, 0, 0, 0);
1016 }
1017 
1018 NODE*
1019 rb_node_newnode(enum node_type type, VALUE a0, VALUE a1, VALUE a2)
1020 {
1021     NODE *n = (NODE *)newobj_of(0, T_NODE, a0, a1, a2);
1022     nd_set_type(n, type);
1023     return n;
1024 }
1025 
1026 VALUE
1027 rb_data_object_alloc(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1028 {
1029     if (klass) Check_Type(klass, T_CLASS);
1030     return newobj_of(klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap);
1031 }
1032 
1033 VALUE
1034 rb_data_typed_object_alloc(VALUE klass, void *datap, const rb_data_type_t *type)
1035 {
1036     if (klass) Check_Type(klass, T_CLASS);
1037     return newobj_of(klass, T_DATA | type->flags, (VALUE)type, (VALUE)1, (VALUE)datap);
1038 }
1039 
1040 size_t
1041 rb_objspace_data_type_memsize(VALUE obj)
1042 {
1043     if (RTYPEDDATA_P(obj) && RTYPEDDATA_TYPE(obj)->function.dsize) {
1044         return RTYPEDDATA_TYPE(obj)->function.dsize(RTYPEDDATA_DATA(obj));
1045     }
1046     else {
1047         return 0;
1048     }
1049 }
1050 
1051 const char *
1052 rb_objspace_data_type_name(VALUE obj)
1053 {
1054     if (RTYPEDDATA_P(obj)) {
1055         return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1056     }
1057     else {
1058         return 0;
1059     }
1060 }
1061 
1062 static inline int
1063 is_pointer_to_heap(rb_objspace_t *objspace, void *ptr)
1064 {
1065     register RVALUE *p = RANY(ptr);
1066     register struct heaps_header *heap;
1067     register size_t hi, lo, mid;
1068 
1069     if (p < lomem || p > himem) return FALSE;
1070     if ((VALUE)p % sizeof(RVALUE) != 0) return FALSE;
1071 
1072     /* check if p looks like a pointer using bsearch*/
1073     lo = 0;
1074     hi = heaps_used;
1075     while (lo < hi) {
1076         mid = (lo + hi) / 2;
1077         heap = objspace->heap.sorted[mid];
1078         if (heap->start <= p) {
1079             if (p < heap->end)
1080                 return TRUE;
1081             lo = mid + 1;
1082         }
1083         else {
1084             hi = mid;
1085         }
1086     }
1087     return FALSE;
1088 }
1089 
1090 static int
1091 free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
1092 {
1093     if (!me->mark) {
1094         rb_free_method_entry(me);
1095     }
1096     return ST_CONTINUE;
1097 }
1098 
1099 void
1100 rb_free_m_table(st_table *tbl)
1101 {
1102     st_foreach(tbl, free_method_entry_i, 0);
1103     st_free_table(tbl);
1104 }
1105 
1106 static int
1107 free_const_entry_i(ID key, rb_const_entry_t *ce, st_data_t data)
1108 {
1109     xfree(ce);
1110     return ST_CONTINUE;
1111 }
1112 
1113 void
1114 rb_free_const_table(st_table *tbl)
1115 {
1116     st_foreach(tbl, free_const_entry_i, 0);
1117     st_free_table(tbl);
1118 }
1119 
1120 static int obj_free(rb_objspace_t *, VALUE);
1121 
1122 static inline struct heaps_slot *
1123 add_slot_local_freelist(rb_objspace_t *objspace, RVALUE *p)
1124 {
1125     struct heaps_slot *slot;
1126 
1127     (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
1128     p->as.free.flags = 0;
1129     slot = GET_HEAP_SLOT(p);
1130     p->as.free.next = slot->freelist;
1131     slot->freelist = p;
1132     rgengc_report(3, objspace, "add_slot_local_freelist: %p (%s) is added to freelist\n", p, obj_type_name((VALUE)p));
1133 
1134     return slot;
1135 }
1136 
1137 static void
1138 unlink_heap_slot(rb_objspace_t *objspace, struct heaps_slot *slot)
1139 {
1140     if (slot->prev)
1141         slot->prev->next = slot->next;
1142     if (slot->next)
1143         slot->next->prev = slot->prev;
1144     if (heaps == slot)
1145         heaps = slot->next;
1146     if (objspace->heap.sweep_slots == slot)
1147         objspace->heap.sweep_slots = slot->next;
1148     slot->prev = NULL;
1149     slot->next = NULL;
1150 }
1151 
1152 static void
1153 free_unused_heaps(rb_objspace_t *objspace)
1154 {
1155     size_t i, j;
1156     struct heaps_header *last = 0;
1157 
1158     for (i = j = 1; j < heaps_used; i++) {
1159         if (objspace->heap.sorted[i]->limit == 0) {
1160             if (!last) {
1161                 last = objspace->heap.sorted[i];
1162             }
1163             else {
1164                 aligned_free(objspace->heap.sorted[i]);
1165             }
1166             heaps_used--;
1167         }
1168         else {
1169             if (i != j) {
1170                 objspace->heap.sorted[j] = objspace->heap.sorted[i];
1171             }
1172             j++;
1173         }
1174     }
1175     if (last) {
1176         if (last < heaps_freed) {
1177             aligned_free(heaps_freed);
1178             heaps_freed = last;
1179         }
1180         else {
1181             aligned_free(last);
1182         }
1183     }
1184 }
1185 static inline void
1186 make_deferred(RVALUE *p)
1187 {
1188     p->as.basic.flags = T_ZOMBIE;
1189 }
1190 
1191 static inline void
1192 make_io_deferred(RVALUE *p)
1193 {
1194     rb_io_t *fptr = p->as.file.fptr;
1195     make_deferred(p);
1196     p->as.data.dfree = (void (*)(void*))rb_io_fptr_finalize;
1197     p->as.data.data = fptr;
1198 }
1199 
1200 static int
1201 obj_free(rb_objspace_t *objspace, VALUE obj)
1202 {
1203     gc_event_hook(objspace, RUBY_INTERNAL_EVENT_FREEOBJ, obj);
1204 
1205     switch (BUILTIN_TYPE(obj)) {
1206       case T_NIL:
1207       case T_FIXNUM:
1208       case T_TRUE:
1209       case T_FALSE:
1210         rb_bug("obj_free() called for broken object");
1211         break;
1212     }
1213 
1214     if (FL_TEST(obj, FL_EXIVAR)) {
1215         rb_free_generic_ivar((VALUE)obj);
1216         FL_UNSET(obj, FL_EXIVAR);
1217     }
1218 
1219 #if USE_RGENGC
1220     if (MARKED_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj),obj))
1221         CLEAR_IN_BITMAP(GET_HEAP_OLDGEN_BITS(obj),obj);
1222 #endif
1223 
1224     switch (BUILTIN_TYPE(obj)) {
1225       case T_OBJECT:
1226         if (!(RANY(obj)->as.basic.flags & ROBJECT_EMBED) &&
1227             RANY(obj)->as.object.as.heap.ivptr) {
1228             xfree(RANY(obj)->as.object.as.heap.ivptr);
1229         }
1230         break;
1231       case T_MODULE:
1232       case T_CLASS:
1233         rb_clear_cache_by_class((VALUE)obj);
1234         if (RCLASS_M_TBL(obj)) {
1235             rb_free_m_table(RCLASS_M_TBL(obj));
1236         }
1237         if (RCLASS_IV_TBL(obj)) {
1238             st_free_table(RCLASS_IV_TBL(obj));
1239         }
1240         if (RCLASS_CONST_TBL(obj)) {
1241             rb_free_const_table(RCLASS_CONST_TBL(obj));
1242         }
1243         if (RCLASS_IV_INDEX_TBL(obj)) {
1244             st_free_table(RCLASS_IV_INDEX_TBL(obj));
1245         }
1246         xfree(RANY(obj)->as.klass.ptr);
1247         break;
1248       case T_STRING:
1249         rb_str_free(obj);
1250         break;
1251       case T_ARRAY:
1252         rb_ary_free(obj);
1253         break;
1254       case T_HASH:
1255         if (RANY(obj)->as.hash.ntbl) {
1256             st_free_table(RANY(obj)->as.hash.ntbl);
1257         }
1258         break;
1259       case T_REGEXP:
1260         if (RANY(obj)->as.regexp.ptr) {
1261             onig_free(RANY(obj)->as.regexp.ptr);
1262         }
1263         break;
1264       case T_DATA:
1265         if (DATA_PTR(obj)) {
1266             if (RTYPEDDATA_P(obj)) {
1267                 RDATA(obj)->dfree = RANY(obj)->as.typeddata.type->function.dfree;
1268             }
1269             if (RANY(obj)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
1270                 xfree(DATA_PTR(obj));
1271             }
1272             else if (RANY(obj)->as.data.dfree) {
1273                 make_deferred(RANY(obj));
1274                 return 1;
1275             }
1276         }
1277         break;
1278       case T_MATCH:
1279         if (RANY(obj)->as.match.rmatch) {
1280             struct rmatch *rm = RANY(obj)->as.match.rmatch;
1281             onig_region_free(&rm->regs, 0);
1282             if (rm->char_offset)
1283                 xfree(rm->char_offset);
1284             xfree(rm);
1285         }
1286         break;
1287       case T_FILE:
1288         if (RANY(obj)->as.file.fptr) {
1289             make_io_deferred(RANY(obj));
1290             return 1;
1291         }
1292         break;
1293       case T_RATIONAL:
1294       case T_COMPLEX:
1295         break;
1296       case T_ICLASS:
1297         /* iClass shares table with the module */
1298         xfree(RANY(obj)->as.klass.ptr);
1299         break;
1300 
1301       case T_FLOAT:
1302         break;
1303 
1304       case T_BIGNUM:
1305         if (!(RBASIC(obj)->flags & RBIGNUM_EMBED_FLAG) && RBIGNUM_DIGITS(obj)) {
1306             xfree(RBIGNUM_DIGITS(obj));
1307         }
1308         break;
1309       case T_NODE:
1310         switch (nd_type(obj)) {
1311           case NODE_SCOPE:
1312             if (RANY(obj)->as.node.u1.tbl) {
1313                 xfree(RANY(obj)->as.node.u1.tbl);
1314             }
1315             break;
1316           case NODE_ARGS:
1317             if (RANY(obj)->as.node.u3.args) {
1318                 xfree(RANY(obj)->as.node.u3.args);
1319             }
1320             break;
1321           case NODE_ALLOCA:
1322             xfree(RANY(obj)->as.node.u1.node);
1323             break;
1324         }
1325         break;                  /* no need to free iv_tbl */
1326 
1327       case T_STRUCT:
1328         if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
1329             RANY(obj)->as.rstruct.as.heap.ptr) {
1330             xfree((void *)RANY(obj)->as.rstruct.as.heap.ptr);
1331         }
1332         break;
1333 
1334       default:
1335         rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1336                BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1337     }
1338 
1339     return 0;
1340 }
1341 
1342 void
1343 Init_heap(void)
1344 {
1345     init_heap(&rb_objspace);
1346 }
1347 
1348 typedef int each_obj_callback(void *, void *, size_t, void *);
1349 
1350 struct each_obj_args {
1351     each_obj_callback *callback;
1352     void *data;
1353 };
1354 
1355 static VALUE
1356 objspace_each_objects(VALUE arg)
1357 {
1358     size_t i;
1359     RVALUE *membase = 0;
1360     RVALUE *pstart, *pend;
1361     rb_objspace_t *objspace = &rb_objspace;
1362     struct each_obj_args *args = (struct each_obj_args *)arg;
1363     volatile VALUE v;
1364 
1365     i = 0;
1366     while (i < heaps_used) {
1367         while (0 < i && (uintptr_t)membase < (uintptr_t)objspace->heap.sorted[i-1])
1368             i--;
1369         while (i < heaps_used && (uintptr_t)objspace->heap.sorted[i] <= (uintptr_t)membase)
1370             i++;
1371         if (heaps_used <= i)
1372           break;
1373         membase = (RVALUE *)objspace->heap.sorted[i];
1374 
1375         pstart = objspace->heap.sorted[i]->start;
1376         pend = pstart + objspace->heap.sorted[i]->limit;
1377 
1378         for (; pstart != pend; pstart++) {
1379             if (pstart->as.basic.flags) {
1380                 v = (VALUE)pstart; /* acquire to save this object */
1381                 break;
1382             }
1383         }
1384         if (pstart != pend) {
1385             if ((*args->callback)(pstart, pend, sizeof(RVALUE), args->data)) {
1386                 break;
1387             }
1388         }
1389     }
1390     RB_GC_GUARD(v);
1391 
1392     return Qnil;
1393 }
1394 
1395 /*
1396  * rb_objspace_each_objects() is special C API to walk through
1397  * Ruby object space.  This C API is too difficult to use it.
1398  * To be frank, you should not use it. Or you need to read the
1399  * source code of this function and understand what this function does.
1400  *
1401  * 'callback' will be called several times (the number of heap slot,
1402  * at current implementation) with:
1403  *   vstart: a pointer to the first living object of the heap_slot.
1404  *   vend: a pointer to next to the valid heap_slot area.
1405  *   stride: a distance to next VALUE.
1406  *
1407  * If callback() returns non-zero, the iteration will be stopped.
1408  *
1409  * This is a sample callback code to iterate liveness objects:
1410  *
1411  *   int
1412  *   sample_callback(void *vstart, void *vend, int stride, void *data) {
1413  *     VALUE v = (VALUE)vstart;
1414  *     for (; v != (VALUE)vend; v += stride) {
1415  *       if (RBASIC(v)->flags) { // liveness check
1416  *       // do something with live object 'v'
1417  *     }
1418  *     return 0; // continue to iteration
1419  *   }
1420  *
1421  * Note: 'vstart' is not a top of heap_slot.  This point the first
1422  *       living object to grasp at least one object to avoid GC issue.
1423  *       This means that you can not walk through all Ruby object slot
1424  *       including freed object slot.
1425  *
1426  * Note: On this implementation, 'stride' is same as sizeof(RVALUE).
1427  *       However, there are possibilities to pass variable values with
1428  *       'stride' with some reasons.  You must use stride instead of
1429  *       use some constant value in the iteration.
1430  */
1431 void
1432 rb_objspace_each_objects(each_obj_callback *callback, void *data)
1433 {
1434     struct each_obj_args args;
1435     rb_objspace_t *objspace = &rb_objspace;
1436 
1437     rest_sweep(objspace);
1438     objspace->flags.dont_lazy_sweep = TRUE;
1439 
1440     args.callback = callback;
1441     args.data = data;
1442     rb_ensure(objspace_each_objects, (VALUE)&args, lazy_sweep_enable, Qnil);
1443 }
1444 
1445 struct os_each_struct {
1446     size_t num;
1447     VALUE of;
1448 };
1449 
1450 static int
1451 internal_object_p(VALUE obj)
1452 {
1453     RVALUE *p = (RVALUE *)obj;
1454 
1455     if (p->as.basic.flags) {
1456         switch (BUILTIN_TYPE(p)) {
1457           case T_NONE:
1458           case T_ICLASS:
1459           case T_NODE:
1460           case T_ZOMBIE:
1461             break;
1462           case T_CLASS:
1463             if (FL_TEST(p, FL_SINGLETON))
1464               break;
1465           default:
1466             if (!p->as.basic.klass) break;
1467             return 0;
1468         }
1469     }
1470     return 1;
1471 }
1472 
1473 int
1474 rb_objspace_internal_object_p(VALUE obj)
1475 {
1476     return internal_object_p(obj);
1477 }
1478 
1479 static int
1480 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1481 {
1482     struct os_each_struct *oes = (struct os_each_struct *)data;
1483     RVALUE *p = (RVALUE *)vstart, *pend = (RVALUE *)vend;
1484 
1485     for (; p != pend; p++) {
1486         volatile VALUE v = (VALUE)p;
1487         if (!internal_object_p(v)) {
1488             if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1489                 rb_yield(v);
1490                 oes->num++;
1491             }
1492         }
1493     }
1494 
1495     return 0;
1496 }
1497 
1498 static VALUE
1499 os_obj_of(VALUE of)
1500 {
1501     struct os_each_struct oes;
1502 
1503     oes.num = 0;
1504     oes.of = of;
1505     rb_objspace_each_objects(os_obj_of_i, &oes);
1506     return SIZET2NUM(oes.num);
1507 }
1508 
1509 /*
1510  *  call-seq:
1511  *     ObjectSpace.each_object([module]) {|obj| ... } -> fixnum
1512  *     ObjectSpace.each_object([module])              -> an_enumerator
1513  *
1514  *  Calls the block once for each living, nonimmediate object in this
1515  *  Ruby process. If <i>module</i> is specified, calls the block
1516  *  for only those classes or modules that match (or are a subclass of)
1517  *  <i>module</i>. Returns the number of objects found. Immediate
1518  *  objects (<code>Fixnum</code>s, <code>Symbol</code>s
1519  *  <code>true</code>, <code>false</code>, and <code>nil</code>) are
1520  *  never returned. In the example below, <code>each_object</code>
1521  *  returns both the numbers we defined and several constants defined in
1522  *  the <code>Math</code> module.
1523  *
1524  *  If no block is given, an enumerator is returned instead.
1525  *
1526  *     a = 102.7
1527  *     b = 95       # Won't be returned
1528  *     c = 12345678987654321
1529  *     count = ObjectSpace.each_object(Numeric) {|x| p x }
1530  *     puts "Total count: #{count}"
1531  *
1532  *  <em>produces:</em>
1533  *
1534  *     12345678987654321
1535  *     102.7
1536  *     2.71828182845905
1537  *     3.14159265358979
1538  *     2.22044604925031e-16
1539  *     1.7976931348623157e+308
1540  *     2.2250738585072e-308
1541  *     Total count: 7
1542  *
1543  */
1544 
1545 static VALUE
1546 os_each_obj(int argc, VALUE *argv, VALUE os)
1547 {
1548     VALUE of;
1549 
1550     if (argc == 0) {
1551         of = 0;
1552     }
1553     else {
1554         rb_scan_args(argc, argv, "01", &of);
1555     }
1556     RETURN_ENUMERATOR(os, 1, &of);
1557     return os_obj_of(of);
1558 }
1559 
1560 /*
1561  *  call-seq:
1562  *     ObjectSpace.undefine_finalizer(obj)
1563  *
1564  *  Removes all finalizers for <i>obj</i>.
1565  *
1566  */
1567 
1568 static VALUE
1569 undefine_final(VALUE os, VALUE obj)
1570 {
1571     return rb_undefine_final(obj);
1572 }
1573 
1574 VALUE
1575 rb_undefine_final(VALUE obj)
1576 {
1577     rb_objspace_t *objspace = &rb_objspace;
1578     st_data_t data = obj;
1579     rb_check_frozen(obj);
1580     st_delete(finalizer_table, &data, 0);
1581     FL_UNSET(obj, FL_FINALIZE);
1582     return obj;
1583 }
1584 
1585 /*
1586  *  call-seq:
1587  *     ObjectSpace.define_finalizer(obj, aProc=proc())
1588  *
1589  *  Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1590  *  was destroyed.
1591  *
1592  */
1593 
1594 static VALUE
1595 define_final(int argc, VALUE *argv, VALUE os)
1596 {
1597     VALUE obj, block;
1598 
1599     rb_scan_args(argc, argv, "11", &obj, &block);
1600     rb_check_frozen(obj);
1601     if (argc == 1) {
1602         block = rb_block_proc();
1603     }
1604     else if (!rb_respond_to(block, rb_intern("call"))) {
1605         rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
1606                  rb_obj_classname(block));
1607     }
1608 
1609     return define_final0(obj, block);
1610 }
1611 
1612 static VALUE
1613 define_final0(VALUE obj, VALUE block)
1614 {
1615     rb_objspace_t *objspace = &rb_objspace;
1616     VALUE table;
1617     st_data_t data;
1618 
1619     if (!FL_ABLE(obj)) {
1620         rb_raise(rb_eArgError, "cannot define finalizer for %s",
1621                  rb_obj_classname(obj));
1622     }
1623     RBASIC(obj)->flags |= FL_FINALIZE;
1624 
1625     block = rb_ary_new3(2, INT2FIX(rb_safe_level()), block);
1626     OBJ_FREEZE(block);
1627 
1628     if (st_lookup(finalizer_table, obj, &data)) {
1629         table = (VALUE)data;
1630         rb_ary_push(table, block);
1631     }
1632     else {
1633         table = rb_ary_new3(1, block);
1634         RBASIC_CLEAR_CLASS(table);
1635         st_add_direct(finalizer_table, obj, table);
1636     }
1637     return block;
1638 }
1639 
1640 VALUE
1641 rb_define_final(VALUE obj, VALUE block)
1642 {
1643     rb_check_frozen(obj);
1644     if (!rb_respond_to(block, rb_intern("call"))) {
1645         rb_raise(rb_eArgError, "wrong type argument %s (should be callable)",
1646                  rb_obj_classname(block));
1647     }
1648     return define_final0(obj, block);
1649 }
1650 
1651 void
1652 rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1653 {
1654     rb_objspace_t *objspace = &rb_objspace;
1655     VALUE table;
1656     st_data_t data;
1657 
1658     if (!FL_TEST(obj, FL_FINALIZE)) return;
1659     if (st_lookup(finalizer_table, obj, &data)) {
1660         table = (VALUE)data;
1661         st_insert(finalizer_table, dest, table);
1662     }
1663     FL_SET(dest, FL_FINALIZE);
1664 }
1665 
1666 static VALUE
1667 run_single_final(VALUE arg)
1668 {
1669     VALUE *args = (VALUE *)arg;
1670     rb_eval_cmd(args[0], args[1], (int)args[2]);
1671     return Qnil;
1672 }
1673 
1674 static void
1675 run_finalizer(rb_objspace_t *objspace, VALUE obj, VALUE table)
1676 {
1677     long i;
1678     int status;
1679     VALUE args[3];
1680     VALUE objid = nonspecial_obj_id(obj);
1681 
1682     if (RARRAY_LEN(table) > 0) {
1683         args[1] = rb_obj_freeze(rb_ary_new3(1, objid));
1684     }
1685     else {
1686         args[1] = 0;
1687     }
1688 
1689     args[2] = (VALUE)rb_safe_level();
1690     for (i=0; i<RARRAY_LEN(table); i++) {
1691         VALUE final = RARRAY_AREF(table, i);
1692         args[0] = RARRAY_AREF(final, 1);
1693         args[2] = FIX2INT(RARRAY_AREF(final, 0));
1694         status = 0;
1695         rb_protect(run_single_final, (VALUE)args, &status);
1696         if (status)
1697             rb_set_errinfo(Qnil);
1698     }
1699 }
1700 
1701 static void
1702 run_final(rb_objspace_t *objspace, VALUE obj)
1703 {
1704     RUBY_DATA_FUNC free_func = 0;
1705     st_data_t key, table;
1706 
1707     objspace->heap.final_num--;
1708 
1709     RBASIC_CLEAR_CLASS(obj);
1710 
1711     if (RTYPEDDATA_P(obj)) {
1712         free_func = RTYPEDDATA_TYPE(obj)->function.dfree;
1713     }
1714     else {
1715         free_func = RDATA(obj)->dfree;
1716     }
1717     if (free_func) {
1718         (*free_func)(DATA_PTR(obj));
1719     }
1720 
1721     key = (st_data_t)obj;
1722     if (st_delete(finalizer_table, &key, &table)) {
1723         run_finalizer(objspace, obj, (VALUE)table);
1724     }
1725 }
1726 
1727 static void
1728 finalize_list(rb_objspace_t *objspace, RVALUE *p)
1729 {
1730     while (p) {
1731         RVALUE *tmp = p->as.free.next;
1732         run_final(objspace, (VALUE)p);
1733         objspace->total_freed_object_num++;
1734         if (!FL_TEST(p, FL_SINGLETON)) { /* not freeing page */
1735             add_slot_local_freelist(objspace, p);
1736             objspace->heap.free_num++;
1737         }
1738         else {
1739             struct heaps_slot *slot = (struct heaps_slot *)(VALUE)RDATA(p)->dmark;
1740             slot->header->limit--;
1741         }
1742         p = tmp;
1743     }
1744 }
1745 
1746 static void
1747 finalize_deferred(rb_objspace_t *objspace)
1748 {
1749     RVALUE *p = deferred_final_list;
1750     deferred_final_list = 0;
1751 
1752     if (p) {
1753         finalize_list(objspace, p);
1754     }
1755 }
1756 
1757 static void
1758 gc_finalize_deferred(void *dmy)
1759 {
1760     rb_objspace_t *objspace = &rb_objspace;
1761     if (ATOMIC_EXCHANGE(finalizing, 1)) return;
1762     finalize_deferred(objspace);
1763     ATOMIC_SET(finalizing, 0);
1764 }
1765 
1766 /* TODO: to keep compatibility, maybe unused. */
1767 void
1768 rb_gc_finalize_deferred(void)
1769 {
1770     gc_finalize_deferred(0);
1771 }
1772 
1773 static void
1774 gc_finalize_deferred_register()
1775 {
1776     rb_postponed_job_register_one(0, gc_finalize_deferred, 0);
1777 }
1778 
1779 struct force_finalize_list {
1780     VALUE obj;
1781     VALUE table;
1782     struct force_finalize_list *next;
1783 };
1784 
1785 static int
1786 force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
1787 {
1788     struct force_finalize_list **prev = (struct force_finalize_list **)arg;
1789     struct force_finalize_list *curr = ALLOC(struct force_finalize_list);
1790     curr->obj = key;
1791     curr->table = val;
1792     curr->next = *prev;
1793     *prev = curr;
1794     return ST_CONTINUE;
1795 }
1796 
1797 void
1798 rb_gc_call_finalizer_at_exit(void)
1799 {
1800     rb_objspace_call_finalizer(&rb_objspace);
1801 }
1802 
1803 static void
1804 rb_objspace_call_finalizer(rb_objspace_t *objspace)
1805 {
1806     RVALUE *p, *pend;
1807     RVALUE *final_list = 0;
1808     size_t i;
1809 
1810     rest_sweep(objspace);
1811 
1812     if (ATOMIC_EXCHANGE(finalizing, 1)) return;
1813 
1814     /* run finalizers */
1815     finalize_deferred(objspace);
1816     assert(deferred_final_list == 0);
1817 
1818     /* force to run finalizer */
1819     while (finalizer_table->num_entries) {
1820         struct force_finalize_list *list = 0;
1821         st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
1822         while (list) {
1823             struct force_finalize_list *curr = list;
1824             st_data_t obj = (st_data_t)curr->obj;
1825             run_finalizer(objspace, curr->obj, curr->table);
1826             st_delete(finalizer_table, &obj, 0);
1827             list = curr->next;
1828             xfree(curr);
1829         }
1830     }
1831 
1832     /* finalizers are part of garbage collection */
1833     during_gc++;
1834 
1835     /* run data object's finalizers */
1836     for (i = 0; i < heaps_used; i++) {
1837         p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;
1838         while (p < pend) {
1839             if (BUILTIN_TYPE(p) == T_DATA &&
1840                 DATA_PTR(p) && RANY(p)->as.data.dfree &&
1841                 !rb_obj_is_thread((VALUE)p) &&
1842                 !rb_obj_is_mutex((VALUE)p) &&
1843                 !rb_obj_is_fiber((VALUE)p)) {
1844                 p->as.free.flags = 0;
1845                 if (RTYPEDDATA_P(p)) {
1846                     RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
1847                 }
1848                 if (RANY(p)->as.data.dfree == (RUBY_DATA_FUNC)-1) {
1849                     xfree(DATA_PTR(p));
1850                 }
1851                 else if (RANY(p)->as.data.dfree) {
1852                     make_deferred(RANY(p));
1853                     RANY(p)->as.free.next = final_list;
1854                     final_list = p;
1855                 }
1856             }
1857             else if (BUILTIN_TYPE(p) == T_FILE) {
1858                 if (RANY(p)->as.file.fptr) {
1859                     make_io_deferred(RANY(p));
1860                     RANY(p)->as.free.next = final_list;
1861                     final_list = p;
1862                 }
1863             }
1864             p++;
1865         }
1866     }
1867     during_gc = 0;
1868     if (final_list) {
1869         finalize_list(objspace, final_list);
1870     }
1871 
1872     st_free_table(finalizer_table);
1873     finalizer_table = 0;
1874     ATOMIC_SET(finalizing, 0);
1875 }
1876 
1877 static inline int
1878 is_id_value(rb_objspace_t *objspace, VALUE ptr)
1879 {
1880     if (!is_pointer_to_heap(objspace, (void *)ptr)) return FALSE;
1881     if (BUILTIN_TYPE(ptr) > T_FIXNUM) return FALSE;
1882     if (BUILTIN_TYPE(ptr) == T_ICLASS) return FALSE;
1883     return TRUE;
1884 }
1885 
1886 static inline int
1887 is_swept_object(rb_objspace_t *objspace, VALUE ptr)
1888 {
1889     struct heaps_slot *slot = objspace->heap.sweep_slots;
1890 
1891     while (slot) {
1892         if ((VALUE)slot->header->start <= ptr && ptr < (VALUE)(slot->header->end))
1893             return FALSE;
1894         slot = slot->next;
1895     }
1896     return TRUE;
1897 }
1898 
1899 static inline int
1900 is_dead_object(rb_objspace_t *objspace, VALUE ptr)
1901 {
1902     if (!is_lazy_sweeping(objspace) || MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr))
1903         return FALSE;
1904     if (!is_swept_object(objspace, ptr))
1905         return TRUE;
1906     return FALSE;
1907 }
1908 
1909 static inline int
1910 is_live_object(rb_objspace_t *objspace, VALUE ptr)
1911 {
1912     if (BUILTIN_TYPE(ptr) == 0) return FALSE;
1913     if (RBASIC(ptr)->klass == 0) return FALSE;
1914     if (is_dead_object(objspace, ptr)) return FALSE;
1915     return TRUE;
1916 }
1917 
1918 /*
1919  *  call-seq:
1920  *     ObjectSpace._id2ref(object_id) -> an_object
1921  *
1922  *  Converts an object id to a reference to the object. May not be
1923  *  called on an object id passed as a parameter to a finalizer.
1924  *
1925  *     s = "I am a string"                    #=> "I am a string"
1926  *     r = ObjectSpace._id2ref(s.object_id)   #=> "I am a string"
1927  *     r == s                                 #=> true
1928  *
1929  */
1930 
1931 static VALUE
1932 id2ref(VALUE obj, VALUE objid)
1933 {
1934 #if SIZEOF_LONG == SIZEOF_VOIDP
1935 #define NUM2PTR(x) NUM2ULONG(x)
1936 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1937 #define NUM2PTR(x) NUM2ULL(x)
1938 #endif
1939     rb_objspace_t *objspace = &rb_objspace;
1940     VALUE ptr;
1941     void *p0;
1942 
1943     ptr = NUM2PTR(objid);
1944     p0 = (void *)ptr;
1945 
1946     if (ptr == Qtrue) return Qtrue;
1947     if (ptr == Qfalse) return Qfalse;
1948     if (ptr == Qnil) return Qnil;
1949     if (FIXNUM_P(ptr)) return (VALUE)ptr;
1950     if (FLONUM_P(ptr)) return (VALUE)ptr;
1951     ptr = obj_id_to_ref(objid);
1952 
1953     if ((ptr % sizeof(RVALUE)) == (4 << 2)) {
1954         ID symid = ptr / sizeof(RVALUE);
1955         if (rb_id2name(symid) == 0)
1956             rb_raise(rb_eRangeError, "%p is not symbol id value", p0);
1957         return ID2SYM(symid);
1958     }
1959 
1960     if (!is_id_value(objspace, ptr)) {
1961         rb_raise(rb_eRangeError, "%p is not id value", p0);
1962     }
1963     if (!is_live_object(objspace, ptr)) {
1964         rb_raise(rb_eRangeError, "%p is recycled object", p0);
1965     }
1966     return (VALUE)ptr;
1967 }
1968 
1969 /*
1970  *  Document-method: __id__
1971  *  Document-method: object_id
1972  *
1973  *  call-seq:
1974  *     obj.__id__       -> integer
1975  *     obj.object_id    -> integer
1976  *
1977  *  Returns an integer identifier for +obj+.
1978  *
1979  *  The same number will be returned on all calls to +id+ for a given object,
1980  *  and no two active objects will share an id.
1981  *
1982  *  Object#object_id is a different concept from the +:name+ notation, which
1983  *  returns the symbol id of +name+.
1984  *
1985  *  Replaces the deprecated Object#id.
1986  */
1987 
1988 /*
1989  *  call-seq:
1990  *     obj.hash    -> fixnum
1991  *
1992  *  Generates a Fixnum hash value for this object.
1993  *
1994  *  This function must have the property that <code>a.eql?(b)</code> implies
1995  *  <code>a.hash == b.hash</code>.
1996  *
1997  *  The hash value is used by Hash class.
1998  *
1999  *  Any hash value that exceeds the capacity of a Fixnum will be truncated
2000  *  before being used.
2001  */
2002 
2003 VALUE
2004 rb_obj_id(VALUE obj)
2005 {
2006     /*
2007      *                32-bit VALUE space
2008      *          MSB ------------------------ LSB
2009      *  false   00000000000000000000000000000000
2010      *  true    00000000000000000000000000000010
2011      *  nil     00000000000000000000000000000100
2012      *  undef   00000000000000000000000000000110
2013      *  symbol  ssssssssssssssssssssssss00001110
2014      *  object  oooooooooooooooooooooooooooooo00        = 0 (mod sizeof(RVALUE))
2015      *  fixnum  fffffffffffffffffffffffffffffff1
2016      *
2017      *                    object_id space
2018      *                                       LSB
2019      *  false   00000000000000000000000000000000
2020      *  true    00000000000000000000000000000010
2021      *  nil     00000000000000000000000000000100
2022      *  undef   00000000000000000000000000000110
2023      *  symbol   000SSSSSSSSSSSSSSSSSSSSSSSSSSS0        S...S % A = 4 (S...S = s...s * A + 4)
2024      *  object   oooooooooooooooooooooooooooooo0        o...o % A = 0
2025      *  fixnum  fffffffffffffffffffffffffffffff1        bignum if required
2026      *
2027      *  where A = sizeof(RVALUE)/4
2028      *
2029      *  sizeof(RVALUE) is
2030      *  20 if 32-bit, double is 4-byte aligned
2031      *  24 if 32-bit, double is 8-byte aligned
2032      *  40 if 64-bit
2033      */
2034     if (SYMBOL_P(obj)) {
2035         return (SYM2ID(obj) * sizeof(RVALUE) + (4 << 2)) | FIXNUM_FLAG;
2036     }
2037     else if (FLONUM_P(obj)) {
2038 #if SIZEOF_LONG == SIZEOF_VOIDP
2039         return LONG2NUM((SIGNED_VALUE)obj);
2040 #else
2041         return LL2NUM((SIGNED_VALUE)obj);
2042 #endif
2043     }
2044     else if (SPECIAL_CONST_P(obj)) {
2045         return LONG2NUM((SIGNED_VALUE)obj);
2046     }
2047     return nonspecial_obj_id(obj);
2048 }
2049 
2050 static int
2051 set_zero(st_data_t key, st_data_t val, st_data_t arg)
2052 {
2053     VALUE k = (VALUE)key;
2054     VALUE hash = (VALUE)arg;
2055     rb_hash_aset(hash, k, INT2FIX(0));
2056     return ST_CONTINUE;
2057 }
2058 
2059 /*
2060  *  call-seq:
2061  *     ObjectSpace.count_objects([result_hash]) -> hash
2062  *
2063  *  Counts objects for each type.
2064  *
2065  *  It returns a hash, such as:
2066  *      {
2067  *        :TOTAL=>10000,
2068  *        :FREE=>3011,
2069  *        :T_OBJECT=>6,
2070  *        :T_CLASS=>404,
2071  *        # ...
2072  *      }
2073  *
2074  *  The contents of the returned hash are implementation specific.
2075  *  It may be changed in future.
2076  *
2077  *  If the optional argument +result_hash+ is given,
2078  *  it is overwritten and returned. This is intended to avoid probe effect.
2079  *
2080  *  This method is only expected to work on C Ruby.
2081  *
2082  */
2083 
2084 static VALUE
2085 count_objects(int argc, VALUE *argv, VALUE os)
2086 {
2087     rb_objspace_t *objspace = &rb_objspace;
2088     size_t counts[T_MASK+1];
2089     size_t freed = 0;
2090     size_t total = 0;
2091     size_t i;
2092     VALUE hash;
2093 
2094     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
2095         if (!RB_TYPE_P(hash, T_HASH))
2096             rb_raise(rb_eTypeError, "non-hash given");
2097     }
2098 
2099     for (i = 0; i <= T_MASK; i++) {
2100         counts[i] = 0;
2101     }
2102 
2103     for (i = 0; i < heaps_used; i++) {
2104         RVALUE *p, *pend;
2105 
2106         p = objspace->heap.sorted[i]->start; pend = p + objspace->heap.sorted[i]->limit;
2107         for (;p < pend; p++) {
2108             if (p->as.basic.flags) {
2109                 counts[BUILTIN_TYPE(p)]++;
2110             }
2111             else {
2112                 freed++;
2113             }
2114         }
2115         total += objspace->heap.sorted[i]->limit;
2116     }
2117 
2118     if (hash == Qnil) {
2119         hash = rb_hash_new();
2120     }
2121     else if (!RHASH_EMPTY_P(hash)) {
2122         st_foreach(RHASH_TBL_RAW(hash), set_zero, hash);
2123     }
2124     rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(total));
2125     rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(freed));
2126 
2127     for (i = 0; i <= T_MASK; i++) {
2128         VALUE type;
2129         switch (i) {
2130 #define COUNT_TYPE(t) case (t): type = ID2SYM(rb_intern(#t)); break;
2131             COUNT_TYPE(T_NONE);
2132             COUNT_TYPE(T_OBJECT);
2133             COUNT_TYPE(T_CLASS);
2134             COUNT_TYPE(T_MODULE);
2135             COUNT_TYPE(T_FLOAT);
2136             COUNT_TYPE(T_STRING);
2137             COUNT_TYPE(T_REGEXP);
2138             COUNT_TYPE(T_ARRAY);
2139             COUNT_TYPE(T_HASH);
2140             COUNT_TYPE(T_STRUCT);
2141             COUNT_TYPE(T_BIGNUM);
2142             COUNT_TYPE(T_FILE);
2143             COUNT_TYPE(T_DATA);
2144             COUNT_TYPE(T_MATCH);
2145             COUNT_TYPE(T_COMPLEX);
2146             COUNT_TYPE(T_RATIONAL);
2147             COUNT_TYPE(T_NIL);
2148             COUNT_TYPE(T_TRUE);
2149             COUNT_TYPE(T_FALSE);
2150             COUNT_TYPE(T_SYMBOL);
2151             COUNT_TYPE(T_FIXNUM);
2152             COUNT_TYPE(T_UNDEF);
2153             COUNT_TYPE(T_NODE);
2154             COUNT_TYPE(T_ICLASS);
2155             COUNT_TYPE(T_ZOMBIE);
2156 #undef COUNT_TYPE
2157           default:              type = INT2NUM(i); break;
2158         }
2159         if (counts[i])
2160             rb_hash_aset(hash, type, SIZET2NUM(counts[i]));
2161     }
2162 
2163     return hash;
2164 }
2165 
2166 /*
2167   ------------------------ Garbage Collection ------------------------
2168 */
2169 
2170 /* Sweeping */
2171 
2172 static VALUE
2173 lazy_sweep_enable(void)
2174 {
2175     rb_objspace_t *objspace = &rb_objspace;
2176 
2177     objspace->flags.dont_lazy_sweep = FALSE;
2178     return Qnil;
2179 }
2180 
2181 static void
2182 gc_setup_mark_bits(struct heaps_slot *slot)
2183 {
2184 #if USE_RGENGC
2185     /* copy oldgen bitmap to mark bitmap */
2186     memcpy(&slot->mark_bits[0], &slot->oldgen_bits[0], HEAP_BITMAP_SIZE);
2187 #else
2188     /* clear mark bitmap */
2189     memset(&slot->mark_bits[0], 0, HEAP_BITMAP_SIZE);
2190 #endif
2191 }
2192 
2193 static size_t
2194 objspace_live_num(rb_objspace_t *objspace)
2195 {
2196     return objspace->total_allocated_object_num - objspace->total_freed_object_num;
2197 }
2198 
2199 static inline void
2200 slot_sweep(rb_objspace_t *objspace, struct heaps_slot *sweep_slot)
2201 {
2202     int i;
2203     size_t empty_num = 0, freed_num = 0, final_num = 0;
2204     RVALUE *p, *pend,*offset;
2205     RVALUE *final = deferred_final_list;
2206     int deferred;
2207     bits_t *bits, bitset;
2208 
2209     rgengc_report(1, objspace, "slot_sweep: start.\n");
2210 
2211     p = sweep_slot->header->start; pend = p + sweep_slot->header->limit;
2212     offset = p - NUM_IN_SLOT(p);
2213     bits = GET_HEAP_MARK_BITS(p);
2214 
2215     /* create guard : fill 1 out-of-range */
2216     bits[BITMAP_INDEX(p)] |= BITMAP_BIT(p)-1;
2217     bits[BITMAP_INDEX(pend)] |= ~(BITMAP_BIT(pend) - 1);
2218 
2219     for (i=0; i < HEAP_BITMAP_LIMIT; i++) {
2220         bitset = ~bits[i];
2221         if (bitset) {
2222             p = offset  + i * BITS_BITLENGTH;
2223             do {
2224                 if ((bitset & 1) && BUILTIN_TYPE(p) != T_ZOMBIE) {
2225                     if (p->as.basic.flags) {
2226                         rgengc_report(3, objspace, "slot_sweep: free %p (%s)\n", p, obj_type_name((VALUE)p));
2227 #if USE_RGENGC && RGENGC_CHECK_MODE
2228                         if (objspace->rgengc.during_minor_gc && RVALUE_PROMOTED(p)) rb_bug("slot_sweep: %p (%s) is promoted.\n", p, obj_type_name((VALUE)p));
2229                         if (rgengc_remembered(objspace, (VALUE)p)) rb_bug("slot_sweep: %p (%s) is remembered.\n", p, obj_type_name((VALUE)p));
2230 #endif
2231                         if ((deferred = obj_free(objspace, (VALUE)p)) || (FL_TEST(p, FL_FINALIZE))) {
2232                             if (!deferred) {
2233                                 p->as.free.flags = T_ZOMBIE;
2234                                 RDATA(p)->dfree = 0;
2235                             }
2236                             p->as.free.next = deferred_final_list;
2237                             deferred_final_list = p;
2238                             assert(BUILTIN_TYPE(p) == T_ZOMBIE);
2239                             final_num++;
2240                         }
2241                         else {
2242                             (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, sizeof(RVALUE));
2243                             p->as.free.flags = 0;
2244                             p->as.free.next = sweep_slot->freelist;
2245                             sweep_slot->freelist = p;
2246                             rgengc_report(3, objspace, "slot_sweep: %p (%s) is added to freelist\n", p, obj_type_name((VALUE)p));
2247                             freed_num++;
2248                         }
2249                     }
2250                     else {
2251                         empty_num++;
2252                     }
2253                 }
2254                 p++;
2255                 bitset >>= 1;
2256             } while (bitset);
2257         }
2258     }
2259 
2260     gc_setup_mark_bits(sweep_slot);
2261 
2262 #if GC_PROFILE_MORE_DETAIL
2263     if (objspace->profile.run) {
2264         gc_profile_record *record = gc_prof_record(objspace);
2265         record->removing_objects += final_num + freed_num;
2266         record->empty_objects += empty_num;
2267     }
2268 #endif
2269 
2270     if (final_num + freed_num + empty_num == sweep_slot->header->limit &&
2271         objspace->heap.free_num > objspace->heap.do_heap_free) {
2272         RVALUE *pp;
2273 
2274         for (pp = deferred_final_list; pp != final; pp = pp->as.free.next) {
2275             RDATA(pp)->dmark = (void (*)(void *))(VALUE)sweep_slot;
2276             pp->as.free.flags |= FL_SINGLETON; /* freeing page mark */
2277         }
2278         sweep_slot->header->limit = final_num;
2279         unlink_heap_slot(objspace, sweep_slot);
2280     }
2281     else {
2282         if (freed_num + empty_num > 0) {
2283             link_free_heap_slot(objspace, sweep_slot);
2284         }
2285         else {
2286             sweep_slot->free_next = NULL;
2287         }
2288         objspace->heap.free_num += freed_num + empty_num;
2289     }
2290     objspace->total_freed_object_num += freed_num;
2291     objspace->heap.final_num += final_num;
2292 
2293     if (deferred_final_list && !finalizing) {
2294         rb_thread_t *th = GET_THREAD();
2295         if (th) {
2296             gc_finalize_deferred_register();
2297         }
2298     }
2299 
2300     rgengc_report(1, objspace, "slot_sweep: end.\n");
2301 }
2302 
2303 static int
2304 ready_to_gc(rb_objspace_t *objspace)
2305 {
2306     if (dont_gc || during_gc) {
2307         if (!has_free_object) {
2308             if (!heaps_increment(objspace)) {
2309                 set_heaps_increment(objspace);
2310                 heaps_increment(objspace);
2311             }
2312         }
2313         return FALSE;
2314     }
2315     return TRUE;
2316 }
2317 
2318 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
2319 __attribute__((noinline))
2320 #endif
2321 static void
2322 before_gc_sweep(rb_objspace_t *objspace)
2323 {
2324     rgengc_report(1, objspace, "before_gc_sweep\n");
2325 
2326     objspace->heap.do_heap_free = (size_t)((heaps_used * HEAP_OBJ_LIMIT) * 0.65);
2327     objspace->heap.free_min = (size_t)((heaps_used * HEAP_OBJ_LIMIT)  * 0.2);
2328     if (objspace->heap.free_min < initial_free_min) {
2329         objspace->heap.free_min = initial_free_min;
2330         if (objspace->heap.do_heap_free < initial_free_min)
2331             objspace->heap.do_heap_free = initial_free_min;
2332     }
2333     objspace->heap.sweep_slots = heaps;
2334     objspace->heap.free_num = 0;
2335     objspace->heap.free_slots = NULL;
2336 
2337     malloc_increase2 += ATOMIC_SIZE_EXCHANGE(malloc_increase,0);
2338 
2339     /* sweep unlinked method entries */
2340     if (GET_VM()->unlinked_method_entry_list) {
2341         rb_sweep_method_entry(GET_VM());
2342     }
2343 }
2344 
2345 static void
2346 after_gc_sweep(rb_objspace_t *objspace)
2347 {
2348     size_t inc;
2349 
2350     rgengc_report(1, objspace, "after_gc_sweep: objspace->heap.free_num: %d, objspace->heap.free_min: %d\n",
2351                   objspace->heap.free_num, objspace->heap.free_min);
2352 
2353     if (objspace->heap.free_num < objspace->heap.free_min) {
2354         set_heaps_increment(objspace);
2355         heaps_increment(objspace);
2356 
2357 #if USE_RGENGC
2358         if (objspace->rgengc.remembered_shady_object_count + objspace->rgengc.oldgen_object_count > (heaps_length * HEAP_OBJ_LIMIT) / 2) {
2359             /* if [oldgen]+[remembered shady] > [all object count]/2, then do major GC */
2360             objspace->rgengc.need_major_gc = TRUE;
2361         }
2362 #endif
2363     }
2364 
2365     gc_prof_set_malloc_info(objspace);
2366     gc_prof_set_heap_info(objspace);
2367 
2368     inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
2369     inc += malloc_increase2;
2370     malloc_increase2 = 0;
2371 
2372     if (inc > malloc_limit) {
2373         malloc_limit +=
2374           (size_t)((inc - malloc_limit) * (double)objspace_live_num(objspace) / (heaps_used * HEAP_OBJ_LIMIT));
2375         if (malloc_limit < initial_malloc_limit) malloc_limit = initial_malloc_limit;
2376     }
2377 
2378     free_unused_heaps(objspace);
2379 
2380 
2381     gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_END, 0 /* TODO: pass minor/immediate flag? */);
2382 }
2383 
2384 static int
2385 lazy_sweep(rb_objspace_t *objspace)
2386 {
2387     struct heaps_slot *next;
2388     int result = FALSE;
2389 
2390     gc_prof_sweep_timer_start(objspace);
2391 
2392     heaps_increment(objspace);
2393     while (is_lazy_sweeping(objspace)) {
2394         next = objspace->heap.sweep_slots->next;
2395         slot_sweep(objspace, objspace->heap.sweep_slots);
2396         objspace->heap.sweep_slots = next;
2397 
2398         if (!next) after_gc_sweep(objspace);
2399 
2400         if (has_free_object) {
2401             result = TRUE;
2402             break;
2403         }
2404     }
2405 
2406     gc_prof_sweep_timer_stop(objspace);
2407 
2408     return result;
2409 }
2410 
2411 static void
2412 rest_sweep(rb_objspace_t *objspace)
2413 {
2414     if (is_lazy_sweeping(objspace)) {
2415         during_gc++;
2416         while (is_lazy_sweeping(objspace)) {
2417             lazy_sweep(objspace);
2418         }
2419         during_gc = 0;
2420     }
2421 }
2422 
2423 static void
2424 gc_sweep(rb_objspace_t *objspace, int immediate_sweep)
2425 {
2426     if (immediate_sweep) {
2427         struct heaps_slot *next;
2428         gc_prof_sweep_timer_start(objspace);
2429         before_gc_sweep(objspace);
2430 
2431         while (objspace->heap.sweep_slots) {
2432             next = objspace->heap.sweep_slots->next;
2433             slot_sweep(objspace, objspace->heap.sweep_slots);
2434             objspace->heap.sweep_slots = next;
2435         }
2436 
2437         after_gc_sweep(objspace);
2438         gc_prof_sweep_timer_stop(objspace);
2439     }
2440     else {
2441         before_gc_sweep(objspace);
2442         lazy_sweep(objspace);
2443     }
2444 
2445     if (!has_free_object) {
2446         /* there is no free after slot_sweep() */
2447         set_heaps_increment(objspace);
2448         if (!heaps_increment(objspace)) { /* can't allocate additional free objects */
2449             during_gc = 0;
2450             rb_memerror();
2451         }
2452     }
2453 }
2454 
2455 static int
2456 gc_prepare_free_objects(rb_objspace_t *objspace)
2457 {
2458     if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_lazy_sweep) {
2459         if (heaps_increment(objspace)) {
2460             return TRUE;
2461         }
2462         else {
2463             return garbage_collect(objspace, FALSE, TRUE, GPR_FLAG_NEWOBJ);
2464         }
2465     }
2466 
2467     if (!ready_to_gc(objspace)) return TRUE;
2468 
2469     during_gc++;
2470 
2471     if (is_lazy_sweeping(objspace)) {
2472         if (lazy_sweep(objspace)) {
2473             during_gc = 0;
2474             return TRUE;
2475         }
2476     }
2477     else {
2478         if (heaps_increment(objspace)) {
2479             during_gc = 0;
2480             return TRUE;
2481         }
2482     }
2483 
2484 #if GC_PROFILE_MORE_DETAIL
2485     objspace->profile.prepare_time = 0;
2486 #endif
2487     return garbage_collect_body(objspace, 0, 0, GPR_FLAG_NEWOBJ);
2488 }
2489 
2490 /* Marking stack */
2491 
2492 static void push_mark_stack(mark_stack_t *, VALUE);
2493 static int pop_mark_stack(mark_stack_t *, VALUE *);
2494 static void shrink_stack_chunk_cache(mark_stack_t *stack);
2495 
2496 static stack_chunk_t *
2497 stack_chunk_alloc(void)
2498 {
2499     stack_chunk_t *res;
2500 
2501     res = malloc(sizeof(stack_chunk_t));
2502     if (!res)
2503         rb_memerror();
2504 
2505     return res;
2506 }
2507 
2508 static inline int
2509 is_mark_stask_empty(mark_stack_t *stack)
2510 {
2511     return stack->chunk == NULL;
2512 }
2513 
2514 static void
2515 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
2516 {
2517     chunk->next = stack->cache;
2518     stack->cache = chunk;
2519     stack->cache_size++;
2520 }
2521 
2522 static void
2523 shrink_stack_chunk_cache(mark_stack_t *stack)
2524 {
2525     stack_chunk_t *chunk;
2526 
2527     if (stack->unused_cache_size > (stack->cache_size/2)) {
2528         chunk = stack->cache;
2529         stack->cache = stack->cache->next;
2530         stack->cache_size--;
2531         free(chunk);
2532     }
2533     stack->unused_cache_size = stack->cache_size;
2534 }
2535 
2536 static void
2537 push_mark_stack_chunk(mark_stack_t *stack)
2538 {
2539     stack_chunk_t *next;
2540 
2541     assert(stack->index == stack->limit);
2542     if (stack->cache_size > 0) {
2543         next = stack->cache;
2544         stack->cache = stack->cache->next;
2545         stack->cache_size--;
2546         if (stack->unused_cache_size > stack->cache_size)
2547             stack->unused_cache_size = stack->cache_size;
2548     }
2549     else {
2550         next = stack_chunk_alloc();
2551     }
2552     next->next = stack->chunk;
2553     stack->chunk = next;
2554     stack->index = 0;
2555 }
2556 
2557 static void
2558 pop_mark_stack_chunk(mark_stack_t *stack)
2559 {
2560     stack_chunk_t *prev;
2561 
2562     prev = stack->chunk->next;
2563     assert(stack->index == 0);
2564     add_stack_chunk_cache(stack, stack->chunk);
2565     stack->chunk = prev;
2566     stack->index = stack->limit;
2567 }
2568 
2569 #if defined(ENABLE_VM_OBJSPACE) && ENABLE_VM_OBJSPACE
2570 static void
2571 free_stack_chunks(mark_stack_t *stack)
2572 {
2573     stack_chunk_t *chunk = stack->chunk;
2574     stack_chunk_t *next = NULL;
2575 
2576     while (chunk != NULL) {
2577         next = chunk->next;
2578         free(chunk);
2579         chunk = next;
2580     }
2581 }
2582 #endif
2583 
2584 static void
2585 push_mark_stack(mark_stack_t *stack, VALUE data)
2586 {
2587     if (stack->index == stack->limit) {
2588         push_mark_stack_chunk(stack);
2589     }
2590     stack->chunk->data[stack->index++] = data;
2591 }
2592 
2593 static int
2594 pop_mark_stack(mark_stack_t *stack, VALUE *data)
2595 {
2596     if (is_mark_stask_empty(stack)) {
2597         return FALSE;
2598     }
2599     if (stack->index == 1) {
2600         *data = stack->chunk->data[--stack->index];
2601         pop_mark_stack_chunk(stack);
2602         return TRUE;
2603     }
2604     *data = stack->chunk->data[--stack->index];
2605     return TRUE;
2606 }
2607 
2608 static void
2609 init_mark_stack(mark_stack_t *stack)
2610 {
2611     int i;
2612 
2613     push_mark_stack_chunk(stack);
2614     stack->limit = STACK_CHUNK_SIZE;
2615 
2616     for (i=0; i < 4; i++) {
2617         add_stack_chunk_cache(stack, stack_chunk_alloc());
2618     }
2619     stack->unused_cache_size = stack->cache_size;
2620 }
2621 
2622 
2623 
2624 #ifdef __ia64
2625 #define SET_STACK_END (SET_MACHINE_STACK_END(&th->machine_stack_end), th->machine_register_stack_end = rb_ia64_bsp())
2626 #else
2627 #define SET_STACK_END SET_MACHINE_STACK_END(&th->machine_stack_end)
2628 #endif
2629 
2630 #define STACK_START (th->machine_stack_start)
2631 #define STACK_END (th->machine_stack_end)
2632 #define STACK_LEVEL_MAX (th->machine_stack_maxsize/sizeof(VALUE))
2633 
2634 #if STACK_GROW_DIRECTION < 0
2635 # define STACK_LENGTH  (size_t)(STACK_START - STACK_END)
2636 #elif STACK_GROW_DIRECTION > 0
2637 # define STACK_LENGTH  (size_t)(STACK_END - STACK_START + 1)
2638 #else
2639 # define STACK_LENGTH  ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2640                         : (size_t)(STACK_END - STACK_START + 1))
2641 #endif
2642 #if !STACK_GROW_DIRECTION
2643 int ruby_stack_grow_direction;
2644 int
2645 ruby_get_stack_grow_direction(volatile VALUE *addr)
2646 {
2647     VALUE *end;
2648     SET_MACHINE_STACK_END(&end);
2649 
2650     if (end > addr) return ruby_stack_grow_direction = 1;
2651     return ruby_stack_grow_direction = -1;
2652 }
2653 #endif
2654 
2655 size_t
2656 ruby_stack_length(VALUE **p)
2657 {
2658     rb_thread_t *th = GET_THREAD();
2659     SET_STACK_END;
2660     if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2661     return STACK_LENGTH;
2662 }
2663 
2664 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2665 static int
2666 stack_check(int water_mark)
2667 {
2668     int ret;
2669     rb_thread_t *th = GET_THREAD();
2670     SET_STACK_END;
2671     ret = STACK_LENGTH > STACK_LEVEL_MAX - water_mark;
2672 #ifdef __ia64
2673     if (!ret) {
2674         ret = (VALUE*)rb_ia64_bsp() - th->machine_register_stack_start >
2675               th->machine_register_stack_maxsize/sizeof(VALUE) - water_mark;
2676     }
2677 #endif
2678     return ret;
2679 }
2680 #endif
2681 
2682 #define STACKFRAME_FOR_CALL_CFUNC 512
2683 
2684 int
2685 ruby_stack_check(void)
2686 {
2687 #if defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK)
2688     return 0;
2689 #else
2690     return stack_check(STACKFRAME_FOR_CALL_CFUNC);
2691 #endif
2692 }
2693 
2694 static void
2695 mark_locations_array(rb_objspace_t *objspace, register VALUE *x, register long n)
2696 {
2697     VALUE v;
2698     while (n--) {
2699         v = *x;
2700         gc_mark_maybe(objspace, v);
2701         x++;
2702     }
2703 }
2704 
2705 static void
2706 gc_mark_locations(rb_objspace_t *objspace, VALUE *start, VALUE *end)
2707 {
2708     long n;
2709 
2710     if (end <= start) return;
2711     n = end - start;
2712     mark_locations_array(objspace, start, n);
2713 }
2714 
2715 void
2716 rb_gc_mark_locations(VALUE *start, VALUE *end)
2717 {
2718     gc_mark_locations(&rb_objspace, start, end);
2719 }
2720 
2721 #define rb_gc_mark_locations(start, end) gc_mark_locations(objspace, (start), (end))
2722 
2723 struct mark_tbl_arg {
2724     rb_objspace_t *objspace;
2725 };
2726 
2727 static int
2728 mark_entry(st_data_t key, st_data_t value, st_data_t data)
2729 {
2730     struct mark_tbl_arg *arg = (void*)data;
2731     gc_mark(arg->objspace, (VALUE)value);
2732     return ST_CONTINUE;
2733 }
2734 
2735 static void
2736 mark_tbl(rb_objspace_t *objspace, st_table *tbl)
2737 {
2738     struct mark_tbl_arg arg;
2739     if (!tbl || tbl->num_entries == 0) return;
2740     arg.objspace = objspace;
2741     st_foreach(tbl, mark_entry, (st_data_t)&arg);
2742 }
2743 
2744 static int
2745 mark_key(st_data_t key, st_data_t value, st_data_t data)
2746 {
2747     struct mark_tbl_arg *arg = (void*)data;
2748     gc_mark(arg->objspace, (VALUE)key);
2749     return ST_CONTINUE;
2750 }
2751 
2752 static void
2753 mark_set(rb_objspace_t *objspace, st_table *tbl)
2754 {
2755     struct mark_tbl_arg arg;
2756     if (!tbl) return;
2757     arg.objspace = objspace;
2758     st_foreach(tbl, mark_key, (st_data_t)&arg);
2759 }
2760 
2761 void
2762 rb_mark_set(st_table *tbl)
2763 {
2764     mark_set(&rb_objspace, tbl);
2765 }
2766 
2767 static int
2768 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2769 {
2770     struct mark_tbl_arg *arg = (void*)data;
2771     gc_mark(arg->objspace, (VALUE)key);
2772     gc_mark(arg->objspace, (VALUE)value);
2773     return ST_CONTINUE;
2774 }
2775 
2776 static void
2777 mark_hash(rb_objspace_t *objspace, st_table *tbl)
2778 {
2779     struct mark_tbl_arg arg;
2780     if (!tbl) return;
2781     arg.objspace = objspace;
2782     st_foreach(tbl, mark_keyvalue, (st_data_t)&arg);
2783 }
2784 
2785 void
2786 rb_mark_hash(st_table *tbl)
2787 {
2788     mark_hash(&rb_objspace, tbl);
2789 }
2790 
2791 static void
2792 mark_method_entry(rb_objspace_t *objspace, const rb_method_entry_t *me)
2793 {
2794     const rb_method_definition_t *def = me->def;
2795 
2796     gc_mark(objspace, me->klass);
2797   again:
2798     if (!def) return;
2799     switch (def->type) {
2800       case VM_METHOD_TYPE_ISEQ:
2801         gc_mark(objspace, def->body.iseq->self);
2802         break;
2803       case VM_METHOD_TYPE_BMETHOD:
2804         gc_mark(objspace, def->body.proc);
2805         break;
2806       case VM_METHOD_TYPE_ATTRSET:
2807       case VM_METHOD_TYPE_IVAR:
2808         gc_mark(objspace, def->body.attr.location);
2809         break;
2810       case VM_METHOD_TYPE_REFINED:
2811         if (def->body.orig_me) {
2812             def = def->body.orig_me->def;
2813             goto again;
2814         }
2815         break;
2816       default:
2817         break; /* ignore */
2818     }
2819 }
2820 
2821 void
2822 rb_mark_method_entry(const rb_method_entry_t *me)
2823 {
2824     mark_method_entry(&rb_objspace, me);
2825 }
2826 
2827 static int
2828 mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
2829 {
2830     struct mark_tbl_arg *arg = (void*)data;
2831     mark_method_entry(arg->objspace, me);
2832     return ST_CONTINUE;
2833 }
2834 
2835 static void
2836 mark_m_tbl(rb_objspace_t *objspace, st_table *tbl)
2837 {
2838     struct mark_tbl_arg arg;
2839     if (!tbl) return;
2840     arg.objspace = objspace;
2841     st_foreach(tbl, mark_method_entry_i, (st_data_t)&arg);
2842 }
2843 
2844 static int
2845 mark_const_entry_i(ID key, const rb_const_entry_t *ce, st_data_t data)
2846 {
2847     struct mark_tbl_arg *arg = (void*)data;
2848     gc_mark(arg->objspace, ce->value);
2849     gc_mark(arg->objspace, ce->file);
2850     return ST_CONTINUE;
2851 }
2852 
2853 static void
2854 mark_const_tbl(rb_objspace_t *objspace, st_table *tbl)
2855 {
2856     struct mark_tbl_arg arg;
2857     if (!tbl) return;
2858     arg.objspace = objspace;
2859     st_foreach(tbl, mark_const_entry_i, (st_data_t)&arg);
2860 }
2861 
2862 #if STACK_GROW_DIRECTION < 0
2863 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2864 #elif STACK_GROW_DIRECTION > 0
2865 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2866 #else
2867 #define GET_STACK_BOUNDS(start, end, appendix) \
2868     ((STACK_END < STACK_START) ? \
2869      ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2870 #endif
2871 
2872 static void
2873 mark_current_machine_context(rb_objspace_t *objspace, rb_thread_t *th)
2874 {
2875     union {
2876         rb_jmp_buf j;
2877         VALUE v[sizeof(rb_jmp_buf) / sizeof(VALUE)];
2878     } save_regs_gc_mark;
2879     VALUE *stack_start, *stack_end;
2880 
2881     FLUSH_REGISTER_WINDOWS;
2882     /* This assumes that all registers are saved into the jmp_buf (and stack) */
2883     rb_setjmp(save_regs_gc_mark.j);
2884 
2885     GET_STACK_BOUNDS(stack_start, stack_end, 1);
2886 
2887     mark_locations_array(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v));
2888 
2889     rb_gc_mark_locations(stack_start, stack_end);
2890 #ifdef __ia64
2891     rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
2892 #endif
2893 #if defined(__mc68000__)
2894     mark_locations_array(objspace, (VALUE*)((char*)STACK_END + 2),
2895                          (STACK_START - STACK_END));
2896 #endif
2897 }
2898 
2899 void
2900 rb_gc_mark_machine_stack(rb_thread_t *th)
2901 {
2902     rb_objspace_t *objspace = &rb_objspace;
2903     VALUE *stack_start, *stack_end;
2904 
2905     GET_STACK_BOUNDS(stack_start, stack_end, 0);
2906     rb_gc_mark_locations(stack_start, stack_end);
2907 #ifdef __ia64
2908     rb_gc_mark_locations(th->machine_register_stack_start, th->machine_register_stack_end);
2909 #endif
2910 }
2911 
2912 void
2913 rb_mark_tbl(st_table *tbl)
2914 {
2915     mark_tbl(&rb_objspace, tbl);
2916 }
2917 
2918 static void
2919 gc_mark_maybe(rb_objspace_t *objspace, VALUE obj)
2920 {
2921     (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
2922     if (is_pointer_to_heap(objspace, (void *)obj)) {
2923         int type = BUILTIN_TYPE(obj);
2924         if (type != T_ZOMBIE && type != T_NONE) {
2925             gc_mark(objspace, obj);
2926         }
2927     }
2928 }
2929 
2930 void
2931 rb_gc_mark_maybe(VALUE obj)
2932 {
2933     gc_mark_maybe(&rb_objspace, obj);
2934 }
2935 
2936 static int
2937 gc_marked(rb_objspace_t *objspace, VALUE ptr)
2938 {
2939     register bits_t *bits = GET_HEAP_MARK_BITS(ptr);
2940     if (MARKED_IN_BITMAP(bits, ptr)) return 1;
2941     return 0;
2942 }
2943 
2944 static int
2945 gc_mark_ptr(rb_objspace_t *objspace, VALUE ptr)
2946 {
2947     register bits_t *bits = GET_HEAP_MARK_BITS(ptr);
2948     if (gc_marked(objspace, ptr)) return 0;
2949     MARK_IN_BITMAP(bits, ptr);
2950     return 1;
2951 }
2952 
2953 static int
2954 markable_object_p(rb_objspace_t *objspace, VALUE obj)
2955 {
2956     if (rb_special_const_p(obj)) return 0; /* special const is not markable */
2957 
2958     if (RGENGC_CHECK_MODE) {
2959         if (!is_pointer_to_heap(objspace, (void *)obj)) rb_bug("markable_object_p: %p is not pointer to heap", (void *)obj);
2960         if (BUILTIN_TYPE(obj) == T_NONE)   rb_bug("markable_object_p: %p is T_NONE", (void *)obj);
2961         if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("markable_object_p: %p is T_ZOMBIE", (void *)obj);
2962     }
2963 
2964     return 1;
2965 }
2966 
2967 int
2968 rb_objspace_markable_object_p(VALUE obj)
2969 {
2970     return markable_object_p(&rb_objspace, obj);
2971 }
2972 
2973 static const char *
2974 type_name(int type, VALUE obj)
2975 {
2976     switch (type) {
2977 #define TYPE_NAME(t) case (t): return #t;
2978             TYPE_NAME(T_NONE);
2979             TYPE_NAME(T_OBJECT);
2980             TYPE_NAME(T_CLASS);
2981             TYPE_NAME(T_MODULE);
2982             TYPE_NAME(T_FLOAT);
2983             TYPE_NAME(T_STRING);
2984             TYPE_NAME(T_REGEXP);
2985             TYPE_NAME(T_ARRAY);
2986             TYPE_NAME(T_HASH);
2987             TYPE_NAME(T_STRUCT);
2988             TYPE_NAME(T_BIGNUM);
2989             TYPE_NAME(T_FILE);
2990             TYPE_NAME(T_MATCH);
2991             TYPE_NAME(T_COMPLEX);
2992             TYPE_NAME(T_RATIONAL);
2993             TYPE_NAME(T_NIL);
2994             TYPE_NAME(T_TRUE);
2995             TYPE_NAME(T_FALSE);
2996             TYPE_NAME(T_SYMBOL);
2997             TYPE_NAME(T_FIXNUM);
2998             TYPE_NAME(T_UNDEF);
2999             TYPE_NAME(T_NODE);
3000             TYPE_NAME(T_ICLASS);
3001             TYPE_NAME(T_ZOMBIE);
3002       case T_DATA:
3003         if (obj && rb_objspace_data_type_name(obj)) {
3004             return rb_objspace_data_type_name(obj);
3005         }
3006         return "T_DATA";
3007 #undef TYPE_NAME
3008     }
3009     return "unknown";
3010 }
3011 
3012 static const char *
3013 obj_type_name(VALUE obj)
3014 {
3015     return type_name(TYPE(obj), obj);
3016 }
3017 
3018 static void
3019 rgengc_check_shady(rb_objspace_t *objspace, VALUE obj)
3020 {
3021 #if USE_RGENGC
3022 #if RGENGC_CHECK_MODE >= 2
3023 #define SAVED_OLD(x) MARKED_IN_BITMAP(GET_HEAP_SLOT(x)->saved_oldgen_bits, (x))
3024 #define SAVED_REM(x) MARKED_IN_BITMAP(GET_HEAP_SLOT(x)->saved_rememberset_bits, (x))
3025     VALUE parent = objspace->rgengc.parent_object;
3026 
3027     if (objspace->rgengc.have_saved_bitmaps && !monitor_level) {
3028         /* check WB sanity */
3029         if (!SAVED_OLD(obj) &&                          /* obj is young object (newly created or shady) */
3030             (!FIXNUM_P(parent) && SAVED_OLD(parent)) && /* parent was old */
3031             !SAVED_REM(parent) &&                       /* parent was not remembered */
3032             !SAVED_REM(obj)) {                          /* obj was not remembered */
3033             fprintf(stderr, "rgengc_check_shady: !!! WB miss: %p (%s) -> %p (%s)\n",
3034                     (void *)parent, obj_type_name(parent),
3035                     (void *)obj, obj_type_name(obj));
3036             if(!st_lookup(monitored_object_table, (st_data_t)obj, NULL)) {
3037                 st_insert(monitored_object_table, (st_data_t)obj, 1);
3038             }
3039         }
3040     } else if (monitor_level) {
3041         st_data_t v;
3042         if (st_lookup(monitored_object_table, (st_data_t)obj, &v)) {
3043             if (v == monitor_level) {
3044                 if (FIXNUM_P(parent)) {
3045                     fprintf(stderr, "rgengc_check_shady: %14s [line %d] -> %p (%s) %d\n",
3046                             "",FIX2INT(parent), (void *)obj, obj_type_name(obj),monitor_level);
3047                 }
3048                 else {
3049                     if (st_lookup(monitored_object_table, (st_data_t)parent, &v)) {
3050                         if(parent == obj) {
3051                             /* skip self reference infomation */
3052                         }
3053                         else
3054                             fprintf(stderr, "rgengc_check_shady: %14u %p (%-8s) -> %p (%-8s) %d\n",(unsigned int)v,
3055                                     (void *)parent, obj_type_name(parent), (void *)obj, obj_type_name(obj),monitor_level);
3056                     } else {
3057                         char const *marker = NULL;
3058                         if (SAVED_REM(parent)) {
3059                             if (SAVED_OLD(parent))
3060                                 marker = "REMEMBERED OLD";
3061                             else
3062                                 marker = "REMEMBERED";
3063                         } else {
3064                             if (SAVED_OLD(parent))
3065                                 marker = "!!!!!!!!!!!!! NO REMEMBERED OLD !!!!!!!!!!!!! ";
3066                             else {
3067                                 marker = "NO PROMOTED";
3068                                 st_insert(monitored_object_table, (st_data_t)parent, v+1);
3069                             }
3070                         }
3071                         fprintf(stderr, "rgengc_check_shady: %-14s %p (%-8s) -> %p (%-8s) %d\n",
3072                                 marker,
3073                                 (void *)parent, obj_type_name(parent), (void *)obj, obj_type_name(obj),monitor_level);
3074                     }
3075                 }
3076             }
3077         }
3078     }
3079 #undef SAVED_OLD
3080 #undef SAVED_REM
3081 #endif /* RGENGC_CHECK_MODE >= 2 */
3082 
3083     if (objspace->rgengc.parent_object_is_promoted && RVALUE_SHADY(obj)) {
3084         if (rgengc_remember(objspace, obj)) {
3085             objspace->rgengc.remembered_shady_object_count++;
3086         }
3087     }
3088 #endif
3089 }
3090 
3091 static void
3092 gc_mark(rb_objspace_t *objspace, VALUE ptr)
3093 {
3094     if (!markable_object_p(objspace, ptr)) return;
3095 
3096     if (LIKELY(objspace->mark_func_data == 0)) {
3097         rgengc_check_shady(objspace, ptr);
3098         if (!gc_mark_ptr(objspace, ptr)) return; /* already marked */
3099         push_mark_stack(&objspace->mark_stack, ptr);
3100     }
3101     else {
3102         objspace->mark_func_data->mark_func(ptr, objspace->mark_func_data->data);
3103     }
3104 }
3105 
3106 void
3107 rb_gc_mark(VALUE ptr)
3108 {
3109     gc_mark(&rb_objspace, ptr);
3110 }
3111 
3112 static void
3113 gc_mark_children(rb_objspace_t *objspace, VALUE ptr)
3114 {
3115     register RVALUE *obj = RANY(ptr);
3116 
3117 #if RGENGC_CHECK_MODE >= 2
3118     objspace->rgengc.parent_object = (VALUE)ptr;
3119 #endif
3120 
3121     goto marking;               /* skip */
3122 
3123   again:
3124     if (LIKELY(objspace->mark_func_data == 0)) {
3125         obj = RANY(ptr);
3126         if (!markable_object_p(objspace, ptr)) return;
3127         rgengc_check_shady(objspace, ptr);
3128         if (!gc_mark_ptr(objspace, ptr)) return;  /* already marked */
3129 #if RGENGC_CHECK_MODE >= 2
3130         objspace->rgengc.parent_object = (VALUE)ptr;
3131 #endif
3132     }
3133     else {
3134         gc_mark(objspace, ptr);
3135         return;
3136     }
3137 
3138 #if USE_RGENGC
3139     if (RGENGC_CHECK_MODE && RVALUE_SHADY(obj) && RVALUE_PROMOTED(obj)) {
3140         rb_bug("gc_mark_children: (0) %p (%s) is shady and promoted.\n", (void *)obj, obj_type_name((VALUE)obj));
3141     }
3142 #endif /* USE_RGENGC */
3143 
3144   marking:
3145 
3146 #if USE_RGENGC
3147     if (LIKELY(objspace->mark_func_data == 0)) {
3148         if (RGENGC_CHECK_MODE && RVALUE_SHADY(obj) && RVALUE_PROMOTED(obj)) {
3149             rb_bug("gc_mark_children: (1) %p (%s) is shady and promoted.\n", (void *)obj, obj_type_name((VALUE)obj));
3150         }
3151 
3152         /* minor/major common */
3153         if (!RVALUE_SHADY(obj)) {
3154             objspace->rgengc.parent_object_is_promoted = TRUE;
3155 
3156             if (!RVALUE_PROMOTED(obj)) {
3157                 RVALUE_PROMOTE((VALUE)obj); /* non-shady object can be promoted to OLDGEN object */
3158                 rgengc_report(3, objspace, "gc_mark_children: promote %p (%s).\n", (void *)obj, obj_type_name((VALUE)obj));
3159 
3160                 objspace->rgengc.oldgen_object_count++;
3161             }
3162             else if (!objspace->rgengc.during_minor_gc) { /* major/full GC */
3163                 objspace->rgengc.oldgen_object_count++;
3164             }
3165         }
3166         else {
3167             rgengc_report(3, objspace, "gc_mark_children: do not promote shady %p (%s).\n", (void *)obj, obj_type_name((VALUE)obj));
3168             objspace->rgengc.parent_object_is_promoted = FALSE;
3169         }
3170 
3171         if (RGENGC_CHECK_MODE && RVALUE_SHADY(obj) && RVALUE_PROMOTED(obj)) {
3172             rb_bug("gc_mark_children: (2) %p (%s) is shady and promoted.\n", (void *)obj, obj_type_name((VALUE)obj));
3173         }
3174     }
3175 #endif /* USE_RGENGC */
3176 
3177     if (FL_TEST(obj, FL_EXIVAR)) {
3178         rb_mark_generic_ivar(ptr);
3179     }
3180 
3181     switch (BUILTIN_TYPE(obj)) {
3182       case T_NIL:
3183       case T_FIXNUM:
3184         rb_bug("rb_gc_mark() called for broken object");
3185         break;
3186 
3187       case T_NODE:
3188         switch (nd_type(obj)) {
3189           case NODE_IF:         /* 1,2,3 */
3190           case NODE_FOR:
3191           case NODE_ITER:
3192           case NODE_WHEN:
3193           case NODE_MASGN:
3194           case NODE_RESCUE:
3195           case NODE_RESBODY:
3196           case NODE_CLASS:
3197           case NODE_BLOCK_PASS:
3198             gc_mark(objspace, (VALUE)obj->as.node.u2.node);
3199             /* fall through */
3200           case NODE_BLOCK:      /* 1,3 */
3201           case NODE_ARRAY:
3202           case NODE_DSTR:
3203           case NODE_DXSTR:
3204           case NODE_DREGX:
3205           case NODE_DREGX_ONCE:
3206           case NODE_ENSURE:
3207           case NODE_CALL:
3208           case NODE_DEFS:
3209           case NODE_OP_ASGN1:
3210             gc_mark(objspace, (VALUE)obj->as.node.u1.node);
3211             /* fall through */
3212           case NODE_SUPER:      /* 3 */
3213           case NODE_FCALL:
3214           case NODE_DEFN:
3215           case NODE_ARGS_AUX:
3216             ptr = (VALUE)obj->as.node.u3.node;
3217             goto again;
3218 
3219           case NODE_WHILE:      /* 1,2 */
3220           case NODE_UNTIL:
3221           case NODE_AND:
3222           case NODE_OR:
3223           case NODE_CASE:
3224           case NODE_SCLASS:
3225           case NODE_DOT2:
3226           case NODE_DOT3:
3227           case NODE_FLIP2:
3228           case NODE_FLIP3:
3229           case NODE_MATCH2:
3230           case NODE_MATCH3:
3231           case NODE_OP_ASGN_OR:
3232           case NODE_OP_ASGN_AND:
3233           case NODE_MODULE:
3234           case NODE_ALIAS:
3235           case NODE_VALIAS:
3236           case NODE_ARGSCAT:
3237             gc_mark(objspace, (VALUE)obj->as.node.u1.node);
3238             /* fall through */
3239           case NODE_GASGN:      /* 2 */
3240           case NODE_LASGN:
3241           case NODE_DASGN:
3242           case NODE_DASGN_CURR:
3243           case NODE_IASGN:
3244           case NODE_IASGN2:
3245           case NODE_CVASGN:
3246           case NODE_COLON3:
3247           case NODE_OPT_N:
3248           case NODE_EVSTR:
3249           case NODE_UNDEF:
3250           case NODE_POSTEXE:
3251             ptr = (VALUE)obj->as.node.u2.node;
3252             goto again;
3253 
3254           case NODE_HASH:       /* 1 */
3255           case NODE_LIT:
3256           case NODE_STR:
3257           case NODE_XSTR:
3258           case NODE_DEFINED:
3259           case NODE_MATCH:
3260           case NODE_RETURN:
3261           case NODE_BREAK:
3262           case NODE_NEXT:
3263           case NODE_YIELD:
3264           case NODE_COLON2:
3265           case NODE_SPLAT:
3266           case NODE_TO_ARY:
3267             ptr = (VALUE)obj->as.node.u1.node;
3268             goto again;
3269 
3270           case NODE_SCOPE:      /* 2,3 */
3271           case NODE_CDECL:
3272           case NODE_OPT_ARG:
3273             gc_mark(objspace, (VALUE)obj->as.node.u3.node);
3274             ptr = (VALUE)obj->as.node.u2.node;
3275             goto again;
3276 
3277           case NODE_ARGS:       /* custom */
3278             {
3279                 struct rb_args_info *args = obj->as.node.u3.args;
3280                 if (args) {
3281                     if (args->pre_init)    gc_mark(objspace, (VALUE)args->pre_init);
3282                     if (args->post_init)   gc_mark(objspace, (VALUE)args->post_init);
3283                     if (args->opt_args)    gc_mark(objspace, (VALUE)args->opt_args);
3284                     if (args->kw_args)     gc_mark(objspace, (VALUE)args->kw_args);
3285                     if (args->kw_rest_arg) gc_mark(objspace, (VALUE)args->kw_rest_arg);
3286                 }
3287             }
3288             ptr = (VALUE)obj->as.node.u2.node;
3289             goto again;
3290 
3291           case NODE_ZARRAY:     /* - */
3292           case NODE_ZSUPER:
3293           case NODE_VCALL:
3294           case NODE_GVAR:
3295           case NODE_LVAR:
3296           case NODE_DVAR:
3297           case NODE_IVAR:
3298           case NODE_CVAR:
3299           case NODE_NTH_REF:
3300           case NODE_BACK_REF:
3301           case NODE_REDO:
3302           case NODE_RETRY:
3303           case NODE_SELF:
3304           case NODE_NIL:
3305           case NODE_TRUE:
3306           case NODE_FALSE:
3307           case NODE_ERRINFO:
3308           case NODE_BLOCK_ARG:
3309             break;
3310           case NODE_ALLOCA:
3311             mark_locations_array(objspace,
3312                                  (VALUE*)obj->as.node.u1.value,
3313                                  obj->as.node.u3.cnt);
3314             gc_mark(objspace, (VALUE)obj->as.node.u2.node);
3315             break;
3316 
3317           case NODE_CREF:
3318             gc_mark(objspace, obj->as.node.nd_refinements);
3319             gc_mark(objspace, (VALUE)obj->as.node.u1.node);
3320             ptr = (VALUE)obj->as.node.u3.node;
3321             goto again;
3322 
3323           default:              /* unlisted NODE */
3324             gc_mark_maybe(objspace, (VALUE)obj->as.node.u1.node);
3325             gc_mark_maybe(objspace, (VALUE)obj->as.node.u2.node);
3326             gc_mark_maybe(objspace, (VALUE)obj->as.node.u3.node);
3327         }
3328         return;                 /* no need to mark class. */
3329     }
3330 
3331     gc_mark(objspace, obj->as.basic.klass);
3332     switch (BUILTIN_TYPE(obj)) {
3333       case T_ICLASS:
3334       case T_CLASS:
3335       case T_MODULE:
3336         mark_m_tbl(objspace, RCLASS_M_TBL(obj));
3337         if (!RCLASS_EXT(obj)) break;
3338         mark_tbl(objspace, RCLASS_IV_TBL(obj));
3339         mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
3340         ptr = RCLASS_SUPER((VALUE)obj);
3341         goto again;
3342 
3343       case T_ARRAY:
3344         if (FL_TEST(obj, ELTS_SHARED)) {
3345             ptr = obj->as.array.as.heap.aux.shared;
3346             goto again;
3347         }
3348         else {
3349             long i, len = RARRAY_LEN(obj);
3350             const VALUE *ptr = RARRAY_RAWPTR(obj);
3351             for (i=0; i < len; i++) {
3352                 gc_mark(objspace, *ptr++);
3353             }
3354         }
3355         break;
3356 
3357       case T_HASH:
3358         mark_hash(objspace, obj->as.hash.ntbl);
3359         ptr = obj->as.hash.ifnone;
3360         goto again;
3361 
3362       case T_STRING:
3363 #define STR_ASSOC FL_USER3   /* copied from string.c */
3364         if (FL_TEST(obj, RSTRING_NOEMBED) && FL_ANY(obj, ELTS_SHARED|STR_ASSOC)) {
3365             ptr = obj->as.string.as.heap.aux.shared;
3366             goto again;
3367         }
3368         break;
3369 
3370       case T_DATA:
3371         if (RTYPEDDATA_P(obj)) {
3372             RUBY_DATA_FUNC mark_func = obj->as.typeddata.type->function.dmark;
3373             if (mark_func) (*mark_func)(DATA_PTR(obj));
3374         }
3375         else {
3376             if (obj->as.data.dmark) (*obj->as.data.dmark)(DATA_PTR(obj));
3377         }
3378         break;
3379 
3380       case T_OBJECT:
3381         {
3382             long i, len = ROBJECT_NUMIV(obj);
3383             VALUE *ptr = ROBJECT_IVPTR(obj);
3384             for (i  = 0; i < len; i++) {
3385                 gc_mark(objspace, *ptr++);
3386             }
3387         }
3388         break;
3389 
3390       case T_FILE:
3391         if (obj->as.file.fptr) {
3392             gc_mark(objspace, obj->as.file.fptr->pathv);
3393             gc_mark(objspace, obj->as.file.fptr->tied_io_for_writing);
3394             gc_mark(objspace, obj->as.file.fptr->writeconv_asciicompat);
3395             gc_mark(objspace, obj->as.file.fptr->writeconv_pre_ecopts);
3396             gc_mark(objspace, obj->as.file.fptr->encs.ecopts);
3397             gc_mark(objspace, obj->as.file.fptr->write_lock);
3398         }
3399         break;
3400 
3401       case T_REGEXP:
3402         ptr = obj->as.regexp.src;
3403         goto again;
3404 
3405       case T_FLOAT:
3406       case T_BIGNUM:
3407         break;
3408 
3409       case T_MATCH:
3410         gc_mark(objspace, obj->as.match.regexp);
3411         if (obj->as.match.str) {
3412             ptr = obj->as.match.str;
3413             goto again;
3414         }
3415         break;
3416 
3417       case T_RATIONAL:
3418         gc_mark(objspace, obj->as.rational.num);
3419         ptr = obj->as.rational.den;
3420         goto again;
3421 
3422       case T_COMPLEX:
3423         gc_mark(objspace, obj->as.complex.real);
3424         ptr = obj->as.complex.imag;
3425         goto again;
3426 
3427       case T_STRUCT:
3428         {
3429             long len = RSTRUCT_LEN(obj);
3430             const VALUE *ptr = RSTRUCT_RAWPTR(obj);
3431 
3432             while (len--) {
3433                 gc_mark(objspace, *ptr++);
3434             }
3435         }
3436         break;
3437 
3438       default:
3439 #ifdef GC_DEBUG
3440         rb_gcdebug_print_obj_condition((VALUE)obj);
3441 #endif
3442         if (BUILTIN_TYPE(obj) == T_NONE)   rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3443         if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3444         rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3445                BUILTIN_TYPE(obj), (void *)obj,
3446                is_pointer_to_heap(objspace, obj) ? "corrupted object" : "non object");
3447     }
3448 }
3449 
3450 static void
3451 gc_mark_stacked_objects(rb_objspace_t *objspace)
3452 {
3453     mark_stack_t *mstack = &objspace->mark_stack;
3454     VALUE obj = 0;
3455 
3456     if (!mstack->index) return;
3457     while (pop_mark_stack(mstack, &obj)) {
3458         gc_mark_children(objspace, obj);
3459     }
3460     shrink_stack_chunk_cache(mstack);
3461 }
3462 
3463 static void
3464 gc_marks_body(rb_objspace_t *objspace, int minor_gc)
3465 {
3466     struct gc_list *list;
3467     rb_thread_t *th = GET_THREAD();
3468 
3469     /* start marking */
3470     rgengc_report(1, objspace, "gc_marks_body: start (%s)\n", minor_gc ? "minor" : "major");
3471 
3472 #if USE_RGENGC
3473     objspace->rgengc.parent_object_is_promoted = FALSE;
3474     objspace->rgengc.parent_object = Qundef;
3475     objspace->rgengc.during_minor_gc = minor_gc;
3476 
3477     if (objspace->rgengc.during_minor_gc) {
3478         objspace->profile.minor_gc_count++;
3479         rgengc_rememberset_mark(objspace);
3480     }
3481     else {
3482         objspace->profile.major_gc_count++;
3483         rgengc_mark_and_rememberset_clear(objspace);
3484     }
3485 #endif
3486 
3487 #if RGENGC_CHECK_MODE > 1
3488 #define MARK_CHECKPOINT do {objspace->rgengc.parent_object = INT2FIX(__LINE__);} while (0)
3489 #else
3490 #define MARK_CHECKPOINT
3491 #endif
3492 
3493     MARK_CHECKPOINT;
3494     SET_STACK_END;
3495     th->vm->self ? rb_gc_mark(th->vm->self) : rb_vm_mark(th->vm);
3496 
3497     MARK_CHECKPOINT;
3498     mark_tbl(objspace, finalizer_table);
3499 
3500     MARK_CHECKPOINT;
3501     mark_current_machine_context(objspace, th);
3502 
3503     MARK_CHECKPOINT;
3504     rb_gc_mark_symbols();
3505 
3506     MARK_CHECKPOINT;
3507     rb_gc_mark_encodings();
3508 
3509     /* mark protected global variables */
3510     MARK_CHECKPOINT;
3511     for (list = global_List; list; list = list->next) {
3512         rb_gc_mark_maybe(*list->varptr);
3513     }
3514 
3515     MARK_CHECKPOINT;
3516     rb_mark_end_proc();
3517 
3518     MARK_CHECKPOINT;
3519     rb_gc_mark_global_tbl();
3520 
3521     MARK_CHECKPOINT;
3522     mark_tbl(objspace, rb_class_tbl);
3523 
3524     /* mark generic instance variables for special constants */
3525     MARK_CHECKPOINT;
3526     rb_mark_generic_ivar_tbl();
3527 
3528     MARK_CHECKPOINT;
3529     rb_gc_mark_parser();
3530 
3531     MARK_CHECKPOINT;
3532     rb_gc_mark_unlinked_live_method_entries(th->vm);
3533 
3534     /* marking-loop */
3535     gc_mark_stacked_objects(objspace);
3536 
3537 #undef MARK_CHECKPOINT
3538 
3539     /* cleanup */
3540     rgengc_report(1, objspace, "gc_marks_body: end (%s)\n", minor_gc ? "minor" : "major");
3541 }
3542 
3543 #if RGENGC_CHECK_MODE >= 2
3544 
3545 static void
3546 gc_oldgen_bitmap2flag(struct heaps_slot *slot)
3547 {
3548     bits_t *oldgen_bits = &slot->oldgen_bits[0];
3549     RVALUE *p = slot->header->start;
3550     RVALUE *pend = p + slot->header->limit;
3551 
3552     while (p < pend) {
3553         if (MARKED_IN_BITMAP(oldgen_bits, p)) FL_SET2(p, FL_OLDGEN);
3554         else                                  FL_UNSET2(p, FL_OLDGEN);
3555         p++;
3556     }
3557 }
3558 
3559 static bits_t *
3560 gc_export_bitmaps(rb_objspace_t *objspace)
3561 {
3562     bits_t *exported_bitmaps = (bits_t *)malloc(HEAP_BITMAP_SIZE * heaps_used * 3);
3563     size_t i;
3564 
3565     if (exported_bitmaps == 0) rb_bug("gc_store_bitmaps: not enough memory to test.\n");
3566 
3567     for (i=0; i<heaps_used; i++) {
3568         struct heaps_slot *slot = objspace->heap.sorted[i]->base;
3569 
3570         memcpy(&exported_bitmaps[(3*i+0)*HEAP_BITMAP_LIMIT], &slot->mark_bits[0],        HEAP_BITMAP_SIZE);
3571         memcpy(&exported_bitmaps[(3*i+1)*HEAP_BITMAP_LIMIT], &slot->rememberset_bits[0], HEAP_BITMAP_SIZE);
3572         memcpy(&exported_bitmaps[(3*i+2)*HEAP_BITMAP_LIMIT], &slot->oldgen_bits[0],      HEAP_BITMAP_SIZE);
3573     }
3574 
3575     return exported_bitmaps;
3576 }
3577 
3578 static void
3579 gc_restore_exported_bitmaps(rb_objspace_t *objspace, bits_t *exported_bitmaps)
3580 {
3581     size_t i;
3582 
3583     for (i=0; i<heaps_used; i++) {
3584         struct heaps_slot *slot = objspace->heap.sorted[i]->base;
3585 
3586         /* restore bitmaps */
3587         memcpy(&slot->mark_bits[0],        &exported_bitmaps[(3*i+0)*HEAP_BITMAP_LIMIT], HEAP_BITMAP_SIZE);
3588         memcpy(&slot->rememberset_bits[0], &exported_bitmaps[(3*i+1)*HEAP_BITMAP_LIMIT], HEAP_BITMAP_SIZE);
3589         memcpy(&slot->oldgen_bits[0],      &exported_bitmaps[(3*i+2)*HEAP_BITMAP_LIMIT], HEAP_BITMAP_SIZE);
3590 
3591         /* restore oldgen flags */
3592         gc_oldgen_bitmap2flag(slot);
3593     }
3594 }
3595 
3596 static void
3597 gc_free_exported_bitmaps(rb_objspace_t *objspace, bits_t *exported_bitmaps)
3598 {
3599     free(exported_bitmaps);
3600 }
3601 
3602 static void
3603 gc_save_bitmaps(rb_objspace_t *objspace)
3604 {
3605     size_t i;
3606 
3607     for (i=0; i<heaps_used; i++) {
3608         struct heaps_slot *slot = objspace->heap.sorted[i]->base;
3609 
3610         /* save bitmaps */
3611         memcpy(&slot->saved_mark_bits[0],        &slot->mark_bits[0],        HEAP_BITMAP_SIZE);
3612         memcpy(&slot->saved_rememberset_bits[0], &slot->rememberset_bits[0], HEAP_BITMAP_SIZE);
3613         memcpy(&slot->saved_oldgen_bits[0],      &slot->oldgen_bits[0],      HEAP_BITMAP_SIZE);
3614     }
3615 
3616     objspace->rgengc.have_saved_bitmaps = TRUE;
3617 }
3618 
3619 static void
3620 gc_load_bitmaps(rb_objspace_t *objspace)
3621 {
3622     size_t i;
3623 
3624     for (i=0; i<heaps_used; i++) {
3625         struct heaps_slot *slot = objspace->heap.sorted[i]->base;
3626 
3627         /* load bitmaps */
3628         memcpy(&slot->mark_bits[0],        &slot->saved_mark_bits[0],        HEAP_BITMAP_SIZE);
3629         memcpy(&slot->rememberset_bits[0], &slot->saved_rememberset_bits[0], HEAP_BITMAP_SIZE);
3630         memcpy(&slot->oldgen_bits[0],      &slot->saved_oldgen_bits[0],      HEAP_BITMAP_SIZE);
3631 
3632         gc_oldgen_bitmap2flag(slot);
3633     }
3634 }
3635 
3636 static void
3637 gc_marks_test(rb_objspace_t *objspace)
3638 {
3639     bits_t *exported_bitmaps;
3640     size_t i;
3641     size_t stored_oldgen, stored_shady;
3642     /*
3643      * Now, we have 2 types bitmaps:
3644      *   saved_bitmap:    before minor marking
3645      *   exported_bitmap: after minor marking
3646      */
3647 
3648 
3649     if(!monitored_object_table)
3650         monitored_object_table = st_init_numtable();
3651     gc_save_bitmaps(objspace);
3652 
3653     rgengc_report(1, objspace, "gc_marks_test: minor gc\n");
3654     {
3655         gc_marks_body(objspace, TRUE);
3656     }
3657     exported_bitmaps = gc_export_bitmaps(objspace);
3658 
3659     rgengc_report(1, objspace, "gc_marks_test: test-full-gc\n");
3660 
3661     /* run major (full) gc with temporary mark/rememberset */
3662     stored_oldgen = objspace->rgengc.oldgen_object_count;
3663     stored_shady = objspace->rgengc.remembered_shady_object_count;
3664     {
3665         gc_marks_body(objspace, FALSE);
3666     }
3667     objspace->rgengc.during_minor_gc = TRUE;
3668     objspace->rgengc.oldgen_object_count = stored_oldgen;
3669     objspace->rgengc.remembered_shady_object_count = stored_shady;
3670 
3671     /* check */
3672     for (i=0; i<heaps_used; i++) {
3673         bits_t *minor_mark_bits = &exported_bitmaps[(3*i+0)*HEAP_BITMAP_LIMIT];
3674         bits_t *major_mark_bits = objspace->heap.sorted[i]->base->mark_bits;
3675         RVALUE *p = objspace->heap.sorted[i]->start;
3676         RVALUE *pend = p + objspace->heap.sorted[i]->limit;
3677 
3678         while (p < pend) {
3679             if (MARKED_IN_BITMAP(major_mark_bits, p) &&  /* should be lived */
3680                 !MARKED_IN_BITMAP(minor_mark_bits, p)) { /* not marked -> BUG! */
3681                 fprintf(stderr, "gc_marks_test: %p (%s) is living, but not marked && not promoted.\n", p, obj_type_name((VALUE)p));
3682                 st_insert(monitored_object_table, (st_data_t)p, 1);
3683             }
3684             p++;
3685         }
3686     }
3687 
3688     if (monitored_object_table->num_entries) {
3689         if (RGENGC_CHECK_MODE >= 3) {
3690             st_index_t old_num;
3691             do {
3692                 old_num = monitored_object_table->num_entries;
3693                 monitor_level ++;
3694                 fprintf(stderr, "!!!! restart major gc for get more information !!!!\n");
3695                 gc_load_bitmaps(objspace);
3696                 gc_marks_body(objspace, FALSE);
3697             } while (old_num != monitored_object_table->num_entries);
3698         }
3699         rb_bug("WriteBarrier Error\n");
3700     }
3701     else {
3702         gc_restore_exported_bitmaps(objspace, exported_bitmaps);
3703         gc_free_exported_bitmaps(objspace, exported_bitmaps);
3704         objspace->rgengc.have_saved_bitmaps = FALSE;
3705     }
3706 }
3707 #endif /* RGENGC_CHECK_MODE >= 2 */
3708 
3709 static void
3710 gc_marks(rb_objspace_t *objspace, int minor_gc)
3711 {
3712     struct mark_func_data_struct *prev_mark_func_data;
3713 
3714     gc_prof_mark_timer_start(objspace);
3715     {
3716         /* setup marking */
3717         prev_mark_func_data = objspace->mark_func_data;
3718         objspace->mark_func_data = 0;
3719 
3720 #if USE_RGENGC
3721         if (minor_gc == FALSE) { /* major/full GC */
3722             objspace->rgengc.remembered_shady_object_count = 0;
3723             objspace->rgengc.oldgen_object_count = 0;
3724 
3725             gc_marks_body(objspace, FALSE);
3726 
3727             /* Do full GC if old/remembered_shady object counts is greater than counts two times at last full GC counts */
3728             objspace->rgengc.remembered_shady_object_limit = objspace->rgengc.remembered_shady_object_count * 2;
3729             objspace->rgengc.oldgen_object_limit = objspace->rgengc.oldgen_object_count * 2;
3730         }
3731         else { /* minor GC */
3732 #if RGENGC_CHECK_MODE >= 2
3733             gc_marks_test(objspace);
3734 #else
3735             gc_marks_body(objspace, TRUE);
3736 #endif
3737         }
3738 
3739 #if RGENGC_PROFILE > 0
3740     if (gc_prof_record(objspace)) {
3741         gc_profile_record *record = gc_prof_record(objspace);
3742         record->oldgen_objects = objspace->rgengc.oldgen_object_count;
3743     }
3744 #endif
3745 
3746 #else /* USE_RGENGC */
3747         gc_marks_body(objspace, FALSE);
3748 #endif
3749 
3750         objspace->mark_func_data = prev_mark_func_data;
3751     }
3752     gc_prof_mark_timer_stop(objspace);
3753 }
3754 
3755 /* RGENGC */
3756 
3757 #if USE_RGENGC
3758 
3759 /* bit operations */
3760 
3761 static int
3762 rgengc_remembersetbits_get(rb_objspace_t *objspace, VALUE obj)
3763 {
3764     bits_t *bits = GET_HEAP_REMEMBERSET_BITS(obj);
3765     return MARKED_IN_BITMAP(bits, obj) ? 1 : 0;
3766 }
3767 
3768 static int
3769 rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
3770 {
3771     bits_t *bits = GET_HEAP_REMEMBERSET_BITS(obj);
3772     if (MARKED_IN_BITMAP(bits, obj)) {
3773         return FALSE;
3774     }
3775     else {
3776         MARK_IN_BITMAP(bits, obj);
3777         return TRUE;
3778     }
3779 }
3780 
3781 /* wb, etc */
3782 
3783 /* return FALSE if already remembered */
3784 static int
3785 rgengc_remember(rb_objspace_t *objspace, VALUE obj)
3786 {
3787     rgengc_report(2, objspace, "rgengc_remember: %p (%s, %s) %s\n", (void *)obj, obj_type_name(obj),
3788                   RVALUE_SHADY(obj) ? "shady" : "non-shady",
3789                   rgengc_remembersetbits_get(objspace, obj) ? "was already remembered" : "is remembered now");
3790 
3791 #if RGENGC_CHECK_MODE > 0
3792     {
3793         switch (BUILTIN_TYPE(obj)) {
3794           case T_NONE:
3795           case T_ZOMBIE:
3796             rb_bug("rgengc_remember: should not remember %p (%s)\n",
3797                    (void *)obj, obj_type_name(obj));
3798           default:
3799             ;
3800         }
3801     }
3802 #endif
3803 
3804     if (RGENGC_PROFILE) {
3805         if (!rgengc_remembered(objspace, obj)) {
3806             if (!RVALUE_SHADY(obj)) {
3807                 objspace->profile.remembered_normal_object_count++;
3808 #if RGENGC_PROFILE >= 2
3809                 objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
3810 #endif
3811             }
3812             else {
3813                 objspace->profile.remembered_shady_object_count++;
3814 #if RGENGC_PROFILE >= 2
3815                 objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
3816 #endif
3817             }
3818         }
3819     }
3820 
3821     return rgengc_remembersetbits_set(objspace, obj);
3822 }
3823 
3824 static int
3825 rgengc_remembered(rb_objspace_t *objspace, VALUE obj)
3826 {
3827     int result = rgengc_remembersetbits_get(objspace, obj);
3828     check_bitmap_consistency(obj);
3829     rgengc_report(6, objspace, "gc_remembered: %p (%s) => %d\n", (void *)obj, obj_type_name(obj), result);
3830     return result;
3831 }
3832 
3833 static void
3834 rgengc_rememberset_mark(rb_objspace_t *objspace)
3835 {
3836     size_t i, j;
3837     RVALUE *p, *offset;
3838     bits_t *bits, bitset;
3839 
3840 #if RGENGC_PROFILE > 0
3841     size_t shady_object_count = 0, clear_count = 0;
3842 #endif
3843 
3844     for (i=0; i<heaps_used; i++) {
3845         p = objspace->heap.sorted[i]->start;
3846         bits = GET_HEAP_REMEMBERSET_BITS(p);
3847 
3848         offset = p - NUM_IN_SLOT(p);
3849 
3850         for (j=0; j < HEAP_BITMAP_LIMIT; j++) {
3851             if (bits[j]) {
3852                 p = offset  + j * BITS_BITLENGTH;
3853                 bitset = bits[j];
3854                 do {
3855                     if (bitset & 1) {
3856                         rgengc_report(2, objspace, "rgengc_rememberset_mark: mark %p (%s)\n", p, obj_type_name((VALUE)p));
3857 
3858                         gc_mark_ptr(objspace, (VALUE)p);
3859                         gc_mark_children(objspace, (VALUE) p);
3860 
3861                         if (!RVALUE_SHADY(p)) {
3862                             rgengc_report(2, objspace, "rgengc_rememberset_mark: clear %p (%s)\n", p, obj_type_name((VALUE)p));
3863                             CLEAR_IN_BITMAP(bits, p);
3864 #if RGENGC_PROFILE > 0
3865                             clear_count++;
3866 #endif
3867                         }
3868                         else {
3869 #if RGENGC_PROFILE > 0
3870                             shady_object_count++;
3871 #endif
3872                         }
3873                     }
3874                     p++;
3875                     bitset >>= 1;
3876                 } while (bitset);
3877             }
3878         }
3879     }
3880 
3881     rgengc_report(2, objspace, "rgengc_rememberset_mark: finished");
3882 
3883 #if RGENGC_PROFILE > 0
3884     rgengc_report(2, objspace, "rgengc_rememberset_mark: clear_count: %"PRIdSIZE", shady_object_count: %"PRIdSIZE"\n", clear_count, shady_object_count);
3885     if (gc_prof_record(objspace)) {
3886         gc_profile_record *record = gc_prof_record(objspace);
3887         record->remembered_normal_objects = clear_count;
3888         record->remembered_shady_objects = shady_object_count;
3889     }
3890 #endif
3891 }
3892 
3893 static void
3894 rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace)
3895 {
3896     size_t i;
3897 
3898     for (i=0; i<heaps_used; i++) {
3899         struct heaps_slot *slot = objspace->heap.sorted[i]->base;
3900         memset(&slot->mark_bits[0],        0, HEAP_BITMAP_SIZE);
3901         memset(&slot->rememberset_bits[0], 0, HEAP_BITMAP_SIZE);
3902     }
3903 }
3904 
3905 /* RGENGC: APIs */
3906 
3907 void
3908 rb_gc_writebarrier(VALUE a, VALUE b)
3909 {
3910     rb_objspace_t *objspace = &rb_objspace;
3911     int type;
3912 
3913     if (RGENGC_CHECK_MODE) {
3914         if (!RVALUE_PROMOTED(a)) rb_bug("rb_gc_wb: referer object %p (%s) is not promoted.\n", (void *)a, obj_type_name(a));
3915         if (RVALUE_PROMOTED(b)) rb_bug("rb_gc_wb: refered object %p (%s) is promoted.\n", (void *)b, obj_type_name(b));
3916     }
3917 
3918     if (!rgengc_remembered(objspace, a)) {
3919         type = BUILTIN_TYPE(a);
3920         /* TODO: 2 << 16 is just a magic number. */
3921         if ((type == T_ARRAY && RARRAY_LEN(a) >= 2 << 16) ||
3922             (type == T_HASH  && RHASH_SIZE(a) >= 2 << 16)) {
3923             if (!rgengc_remembered(objspace, b)) {
3924                 rgengc_report(2, objspace, "rb_gc_wb: %p (%s) -> %p (%s)\n",
3925                               (void *)a, obj_type_name(a), (void *)b, obj_type_name(b));
3926                 rgengc_remember(objspace, b);
3927             }
3928         }
3929         else {
3930             rgengc_report(2, objspace, "rb_gc_wb: %p (%s) -> %p (%s)\n",
3931                           (void *)a, obj_type_name(a), (void *)b, obj_type_name(b));
3932             rgengc_remember(objspace, a);
3933         }
3934     }
3935 }
3936 
3937 void
3938 rb_gc_writebarrier_unprotect_promoted(VALUE obj)
3939 {
3940     rb_objspace_t *objspace = &rb_objspace;
3941 
3942     if (RGENGC_CHECK_MODE) {
3943         if (!RVALUE_PROMOTED(obj)) rb_bug("rb_gc_writebarrier_unprotect_promoted: called on non-promoted object");
3944         if (!RVALUE_SHADY(obj)) rb_bug("rb_gc_writebarrier_unprotect_promoted: called on non-shady object");
3945     }
3946 
3947     rgengc_report(2, objspace, "rb_gc_writebarrier_unprotect_promoted: %p (%s)%s\n", (void *)obj, obj_type_name(obj),
3948                   rgengc_remembered(objspace, obj) ? " (already remembered)" : "");
3949 
3950     RVALUE_DEMOTE(obj);
3951 
3952     rgengc_remember(objspace, obj);
3953     objspace->rgengc.remembered_shady_object_count++;
3954 
3955 #if RGENGC_PROFILE
3956     objspace->profile.shade_operation_count++;
3957 #if RGENGC_PROFILE >= 2
3958     objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
3959 #endif /* RGENGC_PROFILE >= 2 */
3960 #endif
3961 }
3962 
3963 #endif /* USE_RGENGC */
3964 
3965 /* RGENGC analysis information */
3966 
3967 VALUE
3968 rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
3969 {
3970     return OBJ_WB_PROTECTED(obj) ? Qtrue : Qfalse;
3971 }
3972 
3973 VALUE
3974 rb_obj_rgengc_promoted_p(VALUE obj)
3975 {
3976     return OBJ_PROMOTED(obj) ? Qtrue : Qfalse;
3977 }
3978 
3979 /* GC */
3980 
3981 void
3982 rb_gc_force_recycle(VALUE p)
3983 {
3984     rb_objspace_t *objspace = &rb_objspace;
3985     struct heaps_slot *slot;
3986 
3987 #if USE_RGENGC
3988     CLEAR_IN_BITMAP(GET_HEAP_REMEMBERSET_BITS(p), p);
3989     CLEAR_IN_BITMAP(GET_HEAP_OLDGEN_BITS(p), p);
3990     if (!is_before_sweep(p)) {
3991         CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(p), p);
3992     }
3993 #endif
3994 
3995     objspace->total_freed_object_num++;
3996     if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(p), p)) {
3997         add_slot_local_freelist(objspace, (RVALUE *)p);
3998     }
3999     else {
4000         objspace->heap.free_num++;
4001         slot = add_slot_local_freelist(objspace, (RVALUE *)p);
4002         if (slot->free_next == NULL) {
4003             link_free_heap_slot(objspace, slot);
4004         }
4005     }
4006 }
4007 
4008 void
4009 rb_gc_register_mark_object(VALUE obj)
4010 {
4011     VALUE ary = GET_THREAD()->vm->mark_object_ary;
4012     rb_ary_push(ary, obj);
4013 }
4014 
4015 void
4016 rb_gc_register_address(VALUE *addr)
4017 {
4018     rb_objspace_t *objspace = &rb_objspace;
4019     struct gc_list *tmp;
4020 
4021     tmp = ALLOC(struct gc_list);
4022     tmp->next = global_List;
4023     tmp->varptr = addr;
4024     global_List = tmp;
4025 }
4026 
4027 void
4028 rb_gc_unregister_address(VALUE *addr)
4029 {
4030     rb_objspace_t *objspace = &rb_objspace;
4031     struct gc_list *tmp = global_List;
4032 
4033     if (tmp->varptr == addr) {
4034         global_List = tmp->next;
4035         xfree(tmp);
4036         return;
4037     }
4038     while (tmp->next) {
4039         if (tmp->next->varptr == addr) {
4040             struct gc_list *t = tmp->next;
4041 
4042             tmp->next = tmp->next->next;
4043             xfree(t);
4044             break;
4045         }
4046         tmp = tmp->next;
4047     }
4048 }
4049 
4050 #define GC_NOTIFY 0
4051 
4052 static int
4053 garbage_collect_body(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
4054 {
4055     if (ruby_gc_stress && !ruby_disable_gc_stress) {
4056         int flag = FIXNUM_P(ruby_gc_stress) ? FIX2INT(ruby_gc_stress) : 0;
4057 
4058         if (flag & 0x01)
4059             reason &= ~GPR_FLAG_MAJOR_MASK;
4060         else
4061             reason |= GPR_FLAG_MAJOR_BY_STRESS;
4062         immediate_sweep = !(flag & 0x02);
4063     }
4064 
4065 #if USE_RGENGC
4066     else {
4067         if (full_mark)
4068             reason |= GPR_FLAG_MAJOR_BY_NOFREE;
4069         if (objspace->rgengc.need_major_gc) {
4070             objspace->rgengc.need_major_gc = FALSE;
4071             reason |= GPR_FLAG_MAJOR_BY_RESCAN;
4072         }
4073         if (objspace->rgengc.remembered_shady_object_count > objspace->rgengc.remembered_shady_object_limit)
4074             reason |= GPR_FLAG_MAJOR_BY_SHADY;
4075         if (objspace->rgengc.oldgen_object_count > objspace->rgengc.oldgen_object_limit)
4076             reason |= GPR_FLAG_MAJOR_BY_OLDGEN;
4077 
4078         if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_lazy_sweep) {
4079             immediate_sweep = TRUE;
4080         }
4081     }
4082 #endif
4083 
4084     if(immediate_sweep)
4085         reason |= GPR_FLAG_IMMEDIATE_SWEEP;
4086 
4087     if (GC_NOTIFY) fprintf(stderr, "start garbage_collect(%d, %d, %d)\n", full_mark, immediate_sweep, reason);
4088 
4089     objspace->count++;
4090     gc_event_hook(objspace, RUBY_INTERNAL_EVENT_GC_START, 0 /* TODO: pass minor/immediate flag? */);
4091 
4092     objspace->profile.total_allocated_object_num_at_gc_start = objspace->total_allocated_object_num;
4093     objspace->profile.heaps_used_at_gc_start = heaps_used;
4094 
4095     gc_prof_setup_new_record(objspace, reason);
4096     gc_prof_timer_start(objspace);
4097     {
4098         assert(during_gc > 0);
4099         gc_marks(objspace, (reason & GPR_FLAG_MAJOR_MASK) ? FALSE : TRUE);
4100         gc_sweep(objspace, immediate_sweep);
4101         during_gc = 0;
4102     }
4103     gc_prof_timer_stop(objspace);
4104 
4105     if (GC_NOTIFY) fprintf(stderr, "end garbage_collect()\n");
4106     return TRUE;
4107 }
4108 
4109 static int
4110 garbage_collect(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
4111 {
4112     if (!heaps) {
4113         during_gc = 0;
4114         return FALSE;
4115     }
4116     if (!ready_to_gc(objspace)) {
4117         during_gc = 0;
4118         return TRUE;
4119     }
4120 
4121 #if GC_PROFILE_MORE_DETAIL
4122     objspace->profile.prepare_time = getrusage_time();
4123 #endif
4124     rest_sweep(objspace);
4125 #if GC_PROFILE_MORE_DETAIL
4126     objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
4127 #endif
4128 
4129     during_gc++;
4130 
4131     return garbage_collect_body(objspace, full_mark, immediate_sweep, reason);
4132 }
4133 
4134 struct objspace_and_reason {
4135     rb_objspace_t *objspace;
4136     int reason;
4137     int full_mark;
4138     int immediate_sweep;
4139 };
4140 
4141 static void *
4142 gc_with_gvl(void *ptr)
4143 {
4144     struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
4145     return (void *)(VALUE)garbage_collect(oar->objspace, oar->full_mark, oar->immediate_sweep, oar->reason);
4146 }
4147 
4148 static int
4149 garbage_collect_with_gvl(rb_objspace_t *objspace, int full_mark, int immediate_sweep, int reason)
4150 {
4151     if (dont_gc) return TRUE;
4152     if (ruby_thread_has_gvl_p()) {
4153         return garbage_collect(objspace, full_mark, immediate_sweep, reason);
4154     }
4155     else {
4156         if (ruby_native_thread_p()) {
4157             struct objspace_and_reason oar;
4158             oar.objspace = objspace;
4159             oar.reason = reason;
4160             oar.full_mark = full_mark;
4161             oar.immediate_sweep = immediate_sweep;
4162             return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
4163         }
4164         else {
4165             /* no ruby thread */
4166             fprintf(stderr, "[FATAL] failed to allocate memory\n");
4167             exit(EXIT_FAILURE);
4168         }
4169     }
4170 }
4171 
4172 int
4173 rb_garbage_collect(void)
4174 {
4175     return garbage_collect(&rb_objspace, TRUE, TRUE, GPR_FLAG_CAPI);
4176 }
4177 
4178 #undef Init_stack
4179 
4180 void
4181 Init_stack(volatile VALUE *addr)
4182 {
4183     ruby_init_stack(addr);
4184 }
4185 
4186 /*
4187  *  call-seq:
4188  *     GC.start                     -> nil
4189  *     gc.garbage_collect           -> nil
4190  *     ObjectSpace.garbage_collect  -> nil
4191  *
4192  *  Initiates garbage collection, unless manually disabled.
4193  *
4194  */
4195 
4196 VALUE
4197 rb_gc_start(void)
4198 {
4199     rb_gc();
4200     return Qnil;
4201 }
4202 
4203 void
4204 rb_gc(void)
4205 {
4206     rb_objspace_t *objspace = &rb_objspace;
4207     garbage_collect(objspace, TRUE, TRUE, GPR_FLAG_METHOD);
4208     if (!finalizing) finalize_deferred(objspace);
4209     free_unused_heaps(objspace);
4210 }
4211 
4212 int
4213 rb_during_gc(void)
4214 {
4215     rb_objspace_t *objspace = &rb_objspace;
4216     return during_gc;
4217 }
4218 
4219 #if RGENGC_PROFILE >= 2
4220 static void
4221 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
4222 {
4223     VALUE result = rb_hash_new();
4224     int i;
4225     for (i=0; i<T_MASK; i++) {
4226         const char *type = type_name(i, 0);
4227         rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
4228     }
4229     rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
4230 }
4231 #endif
4232 
4233 size_t
4234 rb_gc_count(void)
4235 {
4236     return rb_objspace.count;
4237 }
4238 
4239 /*
4240  *  call-seq:
4241  *     GC.count -> Integer
4242  *
4243  *  The number of times GC occurred.
4244  *
4245  *  It returns the number of times GC occurred since the process started.
4246  *
4247  */
4248 
4249 static VALUE
4250 gc_count(VALUE self)
4251 {
4252     return SIZET2NUM(rb_gc_count());
4253 }
4254 
4255 /*
4256  *  call-seq:
4257  *     GC.stat -> Hash
4258  *
4259  *  Returns a Hash containing information about the GC.
4260  *
4261  *  The hash includes information about internal statistics about GC such as:
4262  *
4263  *      {
4264  *          :count=>0,
4265  *          :heap_used=>12,
4266  *          :heap_length=>12,
4267  *          :heap_increment=>0,
4268  *          :heap_live_num=>7539,
4269  *          :heap_free_num=>88,
4270  *          :heap_final_num=>0,
4271  *          :total_allocated_object=>7630,
4272  *          :total_freed_object=>88
4273  *      }
4274  *
4275  *  The contents of the hash are implementation specific and may be changed in
4276  *  the future.
4277  *
4278  *  This method is only expected to work on C Ruby.
4279  *
4280  */
4281 
4282 static VALUE
4283 gc_stat(int argc, VALUE *argv, VALUE self)
4284 {
4285     rb_objspace_t *objspace = &rb_objspace;
4286     VALUE hash;
4287     static VALUE sym_count;
4288     static VALUE sym_heap_used, sym_heap_length, sym_heap_increment;
4289     static VALUE sym_heap_live_num, sym_heap_free_num, sym_heap_final_num;
4290     static VALUE sym_total_allocated_object, sym_total_freed_object;
4291 #if USE_RGENGC
4292     static VALUE sym_minor_gc_count, sym_major_gc_count;
4293 #if RGENGC_PROFILE
4294     static VALUE sym_generated_normal_object_count, sym_generated_shady_object_count;
4295     static VALUE sym_shade_operation_count, sym_promote_operation_count;
4296     static VALUE sym_remembered_normal_object_count, sym_remembered_shady_object_count;
4297 #endif /* RGENGC_PROFILE */
4298 #endif /* USE_RGENGC */
4299 
4300     if (sym_count == 0) {
4301 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
4302         S(count);
4303         S(heap_used);
4304         S(heap_length);
4305         S(heap_increment);
4306         S(heap_live_num);
4307         S(heap_free_num);
4308         S(heap_final_num);
4309         S(total_allocated_object);
4310         S(total_freed_object);
4311 #if USE_RGENGC
4312         S(minor_gc_count);
4313         S(major_gc_count);
4314 #if RGENGC_PROFILE
4315         S(generated_normal_object_count);
4316         S(generated_shady_object_count);
4317         S(shade_operation_count);
4318         S(promote_operation_count);
4319         S(remembered_normal_object_count);
4320         S(remembered_shady_object_count);
4321 #endif /* USE_RGENGC */
4322 #endif /* RGENGC_PROFILE */
4323 #undef S
4324     }
4325 
4326     if (rb_scan_args(argc, argv, "01", &hash) == 1) {
4327         if (!RB_TYPE_P(hash, T_HASH)) {
4328             rb_raise(rb_eTypeError, "non-hash given");
4329         }
4330     }
4331 
4332     if (hash == Qnil) {
4333         hash = rb_hash_new();
4334     }
4335 
4336     rb_hash_aset(hash, sym_count, SIZET2NUM(objspace->count));
4337     /* implementation dependent counters */
4338     rb_hash_aset(hash, sym_heap_used, SIZET2NUM(objspace->heap.used));
4339     rb_hash_aset(hash, sym_heap_length, SIZET2NUM(objspace->heap.length));
4340     rb_hash_aset(hash, sym_heap_increment, SIZET2NUM(objspace->heap.increment));
4341     rb_hash_aset(hash, sym_heap_live_num, SIZET2NUM(objspace_live_num(objspace)));
4342     rb_hash_aset(hash, sym_heap_free_num, SIZET2NUM(objspace->heap.free_num));
4343     rb_hash_aset(hash, sym_heap_final_num, SIZET2NUM(objspace->heap.final_num));
4344     rb_hash_aset(hash, sym_total_allocated_object, SIZET2NUM(objspace->total_allocated_object_num));
4345     rb_hash_aset(hash, sym_total_freed_object, SIZET2NUM(objspace->total_freed_object_num));
4346 #if USE_RGENGC
4347     rb_hash_aset(hash, sym_minor_gc_count, SIZET2NUM(objspace->profile.minor_gc_count));
4348     rb_hash_aset(hash, sym_major_gc_count, SIZET2NUM(objspace->profile.major_gc_count));
4349 #if RGENGC_PROFILE
4350     rb_hash_aset(hash, sym_generated_normal_object_count, SIZET2NUM(objspace->profile.generated_normal_object_count));
4351     rb_hash_aset(hash, sym_generated_shady_object_count, SIZET2NUM(objspace->profile.generated_shady_object_count));
4352     rb_hash_aset(hash, sym_shade_operation_count, SIZET2NUM(objspace->profile.shade_operation_count));
4353     rb_hash_aset(hash, sym_promote_operation_count, SIZET2NUM(objspace->profile.promote_operation_count));
4354     rb_hash_aset(hash, sym_remembered_normal_object_count, SIZET2NUM(objspace->profile.remembered_normal_object_count));
4355     rb_hash_aset(hash, sym_remembered_shady_object_count, SIZET2NUM(objspace->profile.remembered_shady_object_count));
4356 #if RGENGC_PROFILE >= 2
4357     {
4358         gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
4359         gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
4360         gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
4361         gc_count_add_each_types(hash, "promote_operation_count_types", objspace->profile.promote_operation_count_types);
4362         gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
4363         gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
4364     }
4365 #endif
4366 #endif /* RGENGC_PROFILE */
4367 #endif /* USE_RGENGC */
4368 
4369     return hash;
4370 }
4371 
4372 /*
4373  *  call-seq:
4374  *    GC.stress     -> fixnum, true or false
4375  *
4376  *  Returns current status of GC stress mode.
4377  */
4378 
4379 static VALUE
4380 gc_stress_get(VALUE self)
4381 {
4382     rb_objspace_t *objspace = &rb_objspace;
4383     return ruby_gc_stress;
4384 }
4385 
4386 /*
4387  *  call-seq:
4388  *    GC.stress = bool          -> bool
4389  *
4390  *  Updates the GC stress mode.
4391  *
4392  *  When stress mode is enabled, the GC is invoked at every GC opportunity:
4393  *  all memory and object allocations.
4394  *
4395  *  Enabling stress mode will degrade performance, it is only for debugging.
4396  */
4397 
4398 static VALUE
4399 gc_stress_set(VALUE self, VALUE flag)
4400 {
4401     rb_objspace_t *objspace = &rb_objspace;
4402     rb_secure(2);
4403     ruby_gc_stress = FIXNUM_P(flag) ? flag : (RTEST(flag) ? Qtrue : Qfalse);
4404     return flag;
4405 }
4406 
4407 /*
4408  *  call-seq:
4409  *     GC.enable    -> true or false
4410  *
4411  *  Enables garbage collection, returning +true+ if garbage
4412  *  collection was previously disabled.
4413  *
4414  *     GC.disable   #=> false
4415  *     GC.enable    #=> true
4416  *     GC.enable    #=> false
4417  *
4418  */
4419 
4420 VALUE
4421 rb_gc_enable(void)
4422 {
4423     rb_objspace_t *objspace = &rb_objspace;
4424     int old = dont_gc;
4425 
4426     dont_gc = FALSE;
4427     return old ? Qtrue : Qfalse;
4428 }
4429 
4430 /*
4431  *  call-seq:
4432  *     GC.disable    -> true or false
4433  *
4434  *  Disables garbage collection, returning +true+ if garbage
4435  *  collection was already disabled.
4436  *
4437  *     GC.disable   #=> false
4438  *     GC.disable   #=> true
4439  *
4440  */
4441 
4442 VALUE
4443 rb_gc_disable(void)
4444 {
4445     rb_objspace_t *objspace = &rb_objspace;
4446     int old = dont_gc;
4447 
4448     dont_gc = TRUE;
4449     return old ? Qtrue : Qfalse;
4450 }
4451 
4452 void
4453 rb_gc_set_params(void)
4454 {
4455     char *malloc_limit_ptr, *heap_min_slots_ptr, *free_min_ptr, *growth_factor_ptr;
4456 
4457     if (rb_safe_level() > 0) return;
4458 
4459     malloc_limit_ptr = getenv("RUBY_GC_MALLOC_LIMIT");
4460     if (malloc_limit_ptr != NULL) {
4461         int malloc_limit_i = atoi(malloc_limit_ptr);
4462         if (RTEST(ruby_verbose))
4463             fprintf(stderr, "malloc_limit=%d (%d)\n",
4464                     malloc_limit_i, initial_malloc_limit);
4465         if (malloc_limit_i > 0) {
4466             initial_malloc_limit = malloc_limit_i;
4467         }
4468     }
4469 
4470     heap_min_slots_ptr = getenv("RUBY_HEAP_MIN_SLOTS");
4471     if (heap_min_slots_ptr != NULL) {
4472         int heap_min_slots_i = atoi(heap_min_slots_ptr);
4473         if (RTEST(ruby_verbose))
4474             fprintf(stderr, "heap_min_slots=%d (%d)\n",
4475                     heap_min_slots_i, initial_heap_min_slots);
4476         if (heap_min_slots_i > 0) {
4477             initial_heap_min_slots = heap_min_slots_i;
4478             initial_expand_heap(&rb_objspace);
4479         }
4480     }
4481 
4482     growth_factor_ptr = getenv("RUBY_HEAP_SLOTS_GROWTH_FACTOR");
4483     if (growth_factor_ptr != NULL) {
4484         double growth_factor_f = strtod(growth_factor_ptr, NULL);
4485         if (RTEST(ruby_verbose))
4486             fprintf(stderr, "heap_slots_growth_factor=%f (%f)\n",
4487                     growth_factor_f, initial_growth_factor);
4488         if (growth_factor_f > 1) {
4489             initial_growth_factor = growth_factor_f;
4490         }
4491     }
4492 
4493     free_min_ptr = getenv("RUBY_FREE_MIN");
4494     if (free_min_ptr != NULL) {
4495         int free_min_i = atoi(free_min_ptr);
4496         if (RTEST(ruby_verbose))
4497             fprintf(stderr, "free_min=%d (%d)\n", free_min_i, initial_free_min);
4498         if (free_min_i > 0) {
4499             initial_free_min = free_min_i;
4500         }
4501     }
4502 }
4503 
4504 void
4505 rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4506 {
4507     rb_objspace_t *objspace = &rb_objspace;
4508 
4509     if (markable_object_p(objspace, obj)) {
4510         struct mark_func_data_struct mfd;
4511         mfd.mark_func = func;
4512         mfd.data = data;
4513         objspace->mark_func_data = &mfd;
4514         gc_mark_children(objspace, obj);
4515         objspace->mark_func_data = 0;
4516     }
4517 }
4518 
4519 /*
4520   ------------------------ Extended allocator ------------------------
4521 */
4522 
4523 static void vm_xfree(rb_objspace_t *objspace, void *ptr);
4524 
4525 static void *
4526 negative_size_allocation_error_with_gvl(void *ptr)
4527 {
4528     rb_raise(rb_eNoMemError, "%s", (const char *)ptr);
4529     return 0; /* should not be reached */
4530 }
4531 
4532 static void
4533 negative_size_allocation_error(const char *msg)
4534 {
4535     if (ruby_thread_has_gvl_p()) {
4536         rb_raise(rb_eNoMemError, "%s", msg);
4537     }
4538     else {
4539         if (ruby_native_thread_p()) {
4540             rb_thread_call_with_gvl(negative_size_allocation_error_with_gvl, (void *)msg);
4541         }
4542         else {
4543             fprintf(stderr, "[FATAL] %s\n", msg);
4544             exit(EXIT_FAILURE);
4545         }
4546     }
4547 }
4548 
4549 static void *
4550 ruby_memerror_body(void *dummy)
4551 {
4552     rb_memerror();
4553     return 0;
4554 }
4555 
4556 static void
4557 ruby_memerror(void)
4558 {
4559     if (ruby_thread_has_gvl_p()) {
4560         rb_memerror();
4561     }
4562     else {
4563         if (ruby_native_thread_p()) {
4564             rb_thread_call_with_gvl(ruby_memerror_body, 0);
4565         }
4566         else {
4567             /* no ruby thread */
4568             fprintf(stderr, "[FATAL] failed to allocate memory\n");
4569             exit(EXIT_FAILURE);
4570         }
4571     }
4572 }
4573 
4574 void
4575 rb_memerror(void)
4576 {
4577     rb_thread_t *th = GET_THREAD();
4578     if (!nomem_error ||
4579         (rb_thread_raised_p(th, RAISED_NOMEMORY) && rb_safe_level() < 4)) {
4580         fprintf(stderr, "[FATAL] failed to allocate memory\n");
4581         exit(EXIT_FAILURE);
4582     }
4583     if (rb_thread_raised_p(th, RAISED_NOMEMORY)) {
4584         rb_thread_raised_clear(th);
4585         GET_THREAD()->errinfo = nomem_error;
4586         JUMP_TAG(TAG_RAISE);
4587     }
4588     rb_thread_raised_set(th, RAISED_NOMEMORY);
4589     rb_exc_raise(nomem_error);
4590 }
4591 
4592 static void *
4593 aligned_malloc(size_t alignment, size_t size)
4594 {
4595     void *res;
4596 
4597 #if defined __MINGW32__
4598     res = __mingw_aligned_malloc(size, alignment);
4599 #elif defined _WIN32 && !defined __CYGWIN__
4600     void *_aligned_malloc(size_t, size_t);
4601     res = _aligned_malloc(size, alignment);
4602 #elif defined(HAVE_POSIX_MEMALIGN)
4603     if (posix_memalign(&res, alignment, size) == 0) {
4604         return res;
4605     }
4606     else {
4607         return NULL;
4608     }
4609 #elif defined(HAVE_MEMALIGN)
4610     res = memalign(alignment, size);
4611 #else
4612     char* aligned;
4613     res = malloc(alignment + size + sizeof(void*));
4614     aligned = (char*)res + alignment + sizeof(void*);
4615     aligned -= ((VALUE)aligned & (alignment - 1));
4616     ((void**)aligned)[-1] = res;
4617     res = (void*)aligned;
4618 #endif
4619 
4620 #if defined(_DEBUG) || defined(GC_DEBUG)
4621     /* alignment must be a power of 2 */
4622     assert((alignment - 1) & alignment == 0);
4623     assert(alignment % sizeof(void*) == 0);
4624 #endif
4625     return res;
4626 }
4627 
4628 static void
4629 aligned_free(void *ptr)
4630 {
4631 #if defined __MINGW32__
4632     __mingw_aligned_free(ptr);
4633 #elif defined _WIN32 && !defined __CYGWIN__
4634     _aligned_free(ptr);
4635 #elif defined(HAVE_MEMALIGN) || defined(HAVE_POSIX_MEMALIGN)
4636     free(ptr);
4637 #else
4638     free(((void**)ptr)[-1]);
4639 #endif
4640 }
4641 
4642 static inline size_t
4643 vm_malloc_prepare(rb_objspace_t *objspace, size_t size)
4644 {
4645     if ((ssize_t)size < 0) {
4646         negative_size_allocation_error("negative allocation size (or too big)");
4647     }
4648     if (size == 0) size = 1;
4649 
4650 #if CALC_EXACT_MALLOC_SIZE
4651     size += sizeof(size_t);
4652 #endif
4653 
4654     ATOMIC_SIZE_ADD(malloc_increase, size);
4655     if ((ruby_gc_stress && !ruby_disable_gc_stress) ||
4656         malloc_increase > malloc_limit) {
4657         garbage_collect_with_gvl(objspace, 0, 0, GPR_FLAG_MALLOC);
4658     }
4659 
4660     return size;
4661 }
4662 
4663 static inline void *
4664 vm_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
4665 {
4666 #if CALC_EXACT_MALLOC_SIZE
4667     ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size);
4668     ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
4669     ((size_t *)mem)[0] = size;
4670     mem = (size_t *)mem + 1;
4671 #endif
4672 
4673     return mem;
4674 }
4675 
4676 #define TRY_WITH_GC(alloc) do { \
4677         if (!(alloc) && \
4678             (!garbage_collect_with_gvl(objspace, 1, 1, GPR_FLAG_MALLOC) || /* full mark && immediate sweep */ \
4679              !(alloc))) { \
4680             ruby_memerror(); \
4681         } \
4682     } while (0)
4683 
4684 static void *
4685 vm_xmalloc(rb_objspace_t *objspace, size_t size)
4686 {
4687     void *mem;
4688 
4689     size = vm_malloc_prepare(objspace, size);
4690     TRY_WITH_GC(mem = malloc(size));
4691     return vm_malloc_fixup(objspace, mem, size);
4692 }
4693 
4694 static void *
4695 vm_xrealloc(rb_objspace_t *objspace, void *ptr, size_t size)
4696 {
4697     void *mem;
4698 #if CALC_EXACT_MALLOC_SIZE
4699     size_t oldsize;
4700 #endif
4701 
4702     if ((ssize_t)size < 0) {
4703         negative_size_allocation_error("negative re-allocation size");
4704     }
4705 
4706     if (!ptr) return vm_xmalloc(objspace, size);
4707 
4708     /*
4709      * The behavior of realloc(ptr, 0) is implementation defined.
4710      * Therefore we don't use realloc(ptr, 0) for portability reason.
4711      * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
4712      */
4713     if (size == 0) {
4714         vm_xfree(objspace, ptr);
4715         return 0;
4716     }
4717     if (ruby_gc_stress && !ruby_disable_gc_stress)
4718         garbage_collect_with_gvl(objspace, 0, 0, GPR_FLAG_MALLOC);
4719 
4720 #if CALC_EXACT_MALLOC_SIZE
4721     size += sizeof(size_t);
4722     ptr = (size_t *)ptr - 1;
4723     oldsize = ((size_t *)ptr)[0];
4724 #endif
4725 
4726     mem = realloc(ptr, size);
4727     if (!mem) {
4728         if (garbage_collect_with_gvl(objspace, 1, 1, GPR_FLAG_MALLOC)) {
4729             mem = realloc(ptr, size);
4730         }
4731         if (!mem) {
4732             ruby_memerror();
4733         }
4734     }
4735     ATOMIC_SIZE_ADD(malloc_increase, size);
4736 
4737 #if CALC_EXACT_MALLOC_SIZE
4738     ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, size - oldsize);
4739     ((size_t *)mem)[0] = size;
4740     mem = (size_t *)mem + 1;
4741 #endif
4742 
4743     return mem;
4744 }
4745 
4746 static void
4747 vm_xfree(rb_objspace_t *objspace, void *ptr)
4748 {
4749 #if CALC_EXACT_MALLOC_SIZE
4750     size_t size;
4751     ptr = ((size_t *)ptr) - 1;
4752     size = ((size_t*)ptr)[0];
4753     if (size) {
4754         ATOMIC_SIZE_SUB(objspace->malloc_params.allocated_size, size);
4755         ATOMIC_SIZE_DEC(objspace->malloc_params.allocations);
4756     }
4757 #endif
4758 
4759     free(ptr);
4760 }
4761 
4762 void *
4763 ruby_xmalloc(size_t size)
4764 {
4765     return vm_xmalloc(&rb_objspace, size);
4766 }
4767 
4768 static inline size_t
4769 xmalloc2_size(size_t n, size_t size)
4770 {
4771     size_t len = size * n;
4772     if (n != 0 && size != len / n) {
4773         rb_raise(rb_eArgError, "malloc: possible integer overflow");
4774     }
4775     return len;
4776 }
4777 
4778 void *
4779 ruby_xmalloc2(size_t n, size_t size)
4780 {
4781     return vm_xmalloc(&rb_objspace, xmalloc2_size(n, size));
4782 }
4783 
4784 static void *
4785 vm_xcalloc(rb_objspace_t *objspace, size_t count, size_t elsize)
4786 {
4787     void *mem;
4788     size_t size;
4789 
4790     size = xmalloc2_size(count, elsize);
4791     size = vm_malloc_prepare(objspace, size);
4792 
4793     TRY_WITH_GC(mem = calloc(1, size));
4794     return vm_malloc_fixup(objspace, mem, size);
4795 }
4796 
4797 void *
4798 ruby_xcalloc(size_t n, size_t size)
4799 {
4800     return vm_xcalloc(&rb_objspace, n, size);
4801 }
4802 
4803 void *
4804 ruby_xrealloc(void *ptr, size_t size)
4805 {
4806     return vm_xrealloc(&rb_objspace, ptr, size);
4807 }
4808 
4809 void *
4810 ruby_xrealloc2(void *ptr, size_t n, size_t size)
4811 {
4812     size_t len = size * n;
4813     if (n != 0 && size != len / n) {
4814         rb_raise(rb_eArgError, "realloc: possible integer overflow");
4815     }
4816     return ruby_xrealloc(ptr, len);
4817 }
4818 
4819 void
4820 ruby_xfree(void *x)
4821 {
4822     if (x)
4823         vm_xfree(&rb_objspace, x);
4824 }
4825 
4826 
4827 /* Mimic ruby_xmalloc, but need not rb_objspace.
4828  * should return pointer suitable for ruby_xfree
4829  */
4830 void *
4831 ruby_mimmalloc(size_t size)
4832 {
4833     void *mem;
4834 #if CALC_EXACT_MALLOC_SIZE
4835     size += sizeof(size_t);
4836 #endif
4837     mem = malloc(size);
4838 #if CALC_EXACT_MALLOC_SIZE
4839     /* set 0 for consistency of allocated_size/allocations */
4840     ((size_t *)mem)[0] = 0;
4841     mem = (size_t *)mem + 1;
4842 #endif
4843     return mem;
4844 }
4845 
4846 #if CALC_EXACT_MALLOC_SIZE
4847 /*
4848  *  call-seq:
4849  *     GC.malloc_allocated_size -> Integer
4850  *
4851  *  Returns the size of memory allocated by malloc().
4852  *
4853  *  Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
4854  */
4855 
4856 static VALUE
4857 gc_malloc_allocated_size(VALUE self)
4858 {
4859     return UINT2NUM(rb_objspace.malloc_params.allocated_size);
4860 }
4861 
4862 /*
4863  *  call-seq:
4864  *     GC.malloc_allocations -> Integer
4865  *
4866  *  Returns the number of malloc() allocations.
4867  *
4868  *  Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
4869  */
4870 
4871 static VALUE
4872 gc_malloc_allocations(VALUE self)
4873 {
4874     return UINT2NUM(rb_objspace.malloc_params.allocations);
4875 }
4876 #endif
4877 
4878 /*
4879   ------------------------------ WeakMap ------------------------------
4880 */
4881 
4882 struct weakmap {
4883     st_table *obj2wmap;         /* obj -> [ref,...] */
4884     st_table *wmap2obj;         /* ref -> obj */
4885     VALUE final;
4886 };
4887 
4888 static int
4889 wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
4890 {
4891     gc_mark_ptr((rb_objspace_t *)arg, (VALUE)val);
4892     return ST_CONTINUE;
4893 }
4894 
4895 static void
4896 wmap_mark(void *ptr)
4897 {
4898     struct weakmap *w = ptr;
4899     if (w->obj2wmap) st_foreach(w->obj2wmap, wmap_mark_map, (st_data_t)&rb_objspace);
4900     rb_gc_mark(w->final);
4901 }
4902 
4903 static int
4904 wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
4905 {
4906     rb_ary_resize((VALUE)val, 0);
4907     return ST_CONTINUE;
4908 }
4909 
4910 static void
4911 wmap_free(void *ptr)
4912 {
4913     struct weakmap *w = ptr;
4914     st_foreach(w->obj2wmap, wmap_free_map, 0);
4915     st_free_table(w->obj2wmap);
4916     st_free_table(w->wmap2obj);
4917 }
4918 
4919 size_t rb_ary_memsize(VALUE ary);
4920 static int
4921 wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
4922 {
4923     *(size_t *)arg += rb_ary_memsize((VALUE)val);
4924     return ST_CONTINUE;
4925 }
4926 
4927 static size_t
4928 wmap_memsize(const void *ptr)
4929 {
4930     size_t size;
4931     const struct weakmap *w = ptr;
4932     if (!w) return 0;
4933     size = sizeof(*w);
4934     size += st_memsize(w->obj2wmap);
4935     size += st_memsize(w->wmap2obj);
4936     st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
4937     return size;
4938 }
4939 
4940 static const rb_data_type_t weakmap_type = {
4941     "weakmap",
4942     {
4943         wmap_mark,
4944         wmap_free,
4945         wmap_memsize,
4946     }
4947 };
4948 
4949 static VALUE
4950 wmap_allocate(VALUE klass)
4951 {
4952     struct weakmap *w;
4953     VALUE obj = TypedData_Make_Struct(klass, struct weakmap, &weakmap_type, w);
4954     w->obj2wmap = st_init_numtable();
4955     w->wmap2obj = st_init_numtable();
4956     w->final = rb_obj_method(obj, ID2SYM(rb_intern("finalize")));
4957     return obj;
4958 }
4959 
4960 static int
4961 wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg, int existing)
4962 {
4963     VALUE wmap, ary;
4964     if (!existing) return ST_STOP;
4965     wmap = (VALUE)arg, ary = (VALUE)*value;
4966     rb_ary_delete_same(ary, wmap);
4967     if (!RARRAY_LEN(ary)) return ST_DELETE;
4968     return ST_CONTINUE;
4969 }
4970 
4971 static VALUE
4972 wmap_finalize(VALUE self, VALUE objid)
4973 {
4974     st_data_t orig, wmap, data;
4975     VALUE obj, rids;
4976     long i;
4977     struct weakmap *w;
4978 
4979     TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
4980     /* Get reference from object id. */
4981     obj = obj_id_to_ref(objid);
4982 
4983     /* obj is original referenced object and/or weak reference. */
4984     orig = (st_data_t)obj;
4985     if (st_delete(w->obj2wmap, &orig, &data)) {
4986         rids = (VALUE)data;
4987         for (i = 0; i < RARRAY_LEN(rids); ++i) {
4988             wmap = (st_data_t)RARRAY_AREF(rids, i);
4989             st_delete(w->wmap2obj, &wmap, NULL);
4990         }
4991     }
4992 
4993     wmap = (st_data_t)obj;
4994     if (st_delete(w->wmap2obj, &wmap, &orig)) {
4995         wmap = (st_data_t)obj;
4996         st_update(w->obj2wmap, orig, wmap_final_func, wmap);
4997     }
4998     return self;
4999 }
5000 
5001 /* Creates a weak reference from the given key to the given value */
5002 static VALUE
5003 wmap_aset(VALUE self, VALUE wmap, VALUE orig)
5004 {
5005     st_data_t data;
5006     VALUE rids;
5007     struct weakmap *w;
5008 
5009     TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
5010     rb_define_final(orig, w->final);
5011     rb_define_final(wmap, w->final);
5012     if (st_lookup(w->obj2wmap, (st_data_t)orig, &data)) {
5013         rids = (VALUE)data;
5014     }
5015     else {
5016         rids = rb_ary_tmp_new(1);
5017         st_insert(w->obj2wmap, (st_data_t)orig, (st_data_t)rids);
5018     }
5019     rb_ary_push(rids, wmap);
5020     st_insert(w->wmap2obj, (st_data_t)wmap, (st_data_t)orig);
5021     return nonspecial_obj_id(orig);
5022 }
5023 
5024 /* Retrieves a weakly referenced object with the given key */
5025 static VALUE
5026 wmap_aref(VALUE self, VALUE wmap)
5027 {
5028     st_data_t data;
5029     VALUE obj;
5030     struct weakmap *w;
5031     rb_objspace_t *objspace = &rb_objspace;
5032 
5033     TypedData_Get_Struct(self, struct weakmap, &weakmap_type, w);
5034     if (!st_lookup(w->wmap2obj, (st_data_t)wmap, &data)) return Qnil;
5035     obj = (VALUE)data;
5036     if (!is_id_value(objspace, obj)) return Qnil;
5037     if (!is_live_object(objspace, obj)) return Qnil;
5038     return obj;
5039 }
5040 
5041 
5042 /*
5043   ------------------------------ GC profiler ------------------------------
5044 */
5045 
5046 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
5047 
5048 static double
5049 getrusage_time(void)
5050 {
5051 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
5052     struct timespec ts;
5053 
5054     if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) == 0) {
5055         return ts.tv_sec + ts.tv_nsec * 1e-9;
5056     }
5057     return 0.0;
5058 #elif defined RUSAGE_SELF
5059     struct rusage usage;
5060     struct timeval time;
5061     getrusage(RUSAGE_SELF, &usage);
5062     time = usage.ru_utime;
5063     return time.tv_sec + time.tv_usec * 1e-6;
5064 #elif defined _WIN32
5065     FILETIME creation_time, exit_time, kernel_time, user_time;
5066     ULARGE_INTEGER ui;
5067     LONG_LONG q;
5068     double t;
5069 
5070     if (GetProcessTimes(GetCurrentProcess(),
5071                         &creation_time, &exit_time, &kernel_time, &user_time) == 0)
5072     {
5073         return 0.0;
5074     }
5075     memcpy(&ui, &user_time, sizeof(FILETIME));
5076     q = ui.QuadPart / 10L;
5077     t = (DWORD)(q % 1000000L) * 1e-6;
5078     q /= 1000000L;
5079 #ifdef __GNUC__
5080     t += q;
5081 #else
5082     t += (double)(DWORD)(q >> 16) * (1 << 16);
5083     t += (DWORD)q & ~(~0 << 16);
5084 #endif
5085     return t;
5086 #else
5087     return 0.0;
5088 #endif
5089 }
5090 
5091 static inline void
5092 gc_prof_setup_new_record(rb_objspace_t *objspace, int reason)
5093 {
5094     if (objspace->profile.run) {
5095         size_t index = objspace->profile.next_index;
5096         gc_profile_record *record;
5097 
5098         /* create new record */
5099         objspace->profile.next_index++;
5100 
5101         if (!objspace->profile.records) {
5102             objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
5103             objspace->profile.records = malloc(sizeof(gc_profile_record) * objspace->profile.size);
5104         }
5105         if (index >= objspace->profile.size) {
5106             objspace->profile.size += 1000;
5107             objspace->profile.records = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
5108         }
5109         if (!objspace->profile.records) {
5110             rb_bug("gc_profile malloc or realloc miss");
5111         }
5112         record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
5113         MEMZERO(record, gc_profile_record, 1);
5114 
5115         /* setup before-GC parameter */
5116         record->flags = reason | ((ruby_gc_stress && !ruby_disable_gc_stress) ? GPR_FLAG_STRESS : 0);
5117 #if CALC_EXACT_MALLOC_SIZE
5118         record->allocated_size = malloc_allocated_size;
5119 #endif
5120     }
5121 }
5122 
5123 static inline void
5124 gc_prof_timer_start(rb_objspace_t *objspace)
5125 {
5126     if (objspace->profile.run) {
5127         gc_profile_record *record = gc_prof_record(objspace);
5128 #if GC_PROFILE_MORE_DETAIL
5129         record->prepare_time = objspace->profile.prepare_time;
5130 #endif
5131         record->gc_time = 0;
5132         record->gc_invoke_time = getrusage_time();
5133     }
5134 }
5135 
5136 static double
5137 elapsed_time_from(double time)
5138 {
5139     double now = getrusage_time();
5140     if (now > time) {
5141         return now - time;
5142     }
5143     else {
5144         return 0;
5145     }
5146 }
5147 
5148 static inline void
5149 gc_prof_timer_stop(rb_objspace_t *objspace)
5150 {
5151     if (objspace->profile.run) {
5152         gc_profile_record *record = gc_prof_record(objspace);
5153         record->gc_time = elapsed_time_from(record->gc_invoke_time);
5154         record->gc_invoke_time -= objspace->profile.invoke_time;
5155     }
5156 }
5157 
5158 static inline void
5159 gc_prof_mark_timer_start(rb_objspace_t *objspace)
5160 {
5161     if (RUBY_DTRACE_GC_MARK_BEGIN_ENABLED()) {
5162         RUBY_DTRACE_GC_MARK_BEGIN();
5163     }
5164 #if GC_PROFILE_MORE_DETAIL
5165     if (objspace->profile.run) {
5166         gc_prof_record(objspace)->gc_mark_time = getrusage_time();
5167     }
5168 #endif
5169 }
5170 
5171 static inline void
5172 gc_prof_mark_timer_stop(rb_objspace_t *objspace)
5173 {
5174     if (RUBY_DTRACE_GC_MARK_END_ENABLED()) {
5175         RUBY_DTRACE_GC_MARK_END();
5176     }
5177 #if GC_PROFILE_MORE_DETAIL
5178     if (objspace->profile.run) {
5179         gc_profile_record *record = gc_prof_record(objspace);
5180         record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
5181     }
5182 #endif
5183 }
5184 
5185 static inline void
5186 gc_prof_sweep_timer_start(rb_objspace_t *objspace)
5187 {
5188     if (RUBY_DTRACE_GC_SWEEP_BEGIN_ENABLED()) {
5189         RUBY_DTRACE_GC_SWEEP_BEGIN();
5190     }
5191     if (objspace->profile.run) {
5192         gc_profile_record *record = gc_prof_record(objspace);
5193 
5194         if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
5195             objspace->profile.gc_sweep_start_time = getrusage_time();
5196         }
5197     }
5198 }
5199 
5200 static inline void
5201 gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
5202 {
5203     if (RUBY_DTRACE_GC_SWEEP_END_ENABLED()) {
5204         RUBY_DTRACE_GC_SWEEP_END();
5205     }
5206 
5207     if (objspace->profile.run) {
5208         double sweep_time;
5209         gc_profile_record *record = gc_prof_record(objspace);
5210 
5211         if (record->gc_time > 0) {
5212             sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
5213             /* need to accumulate GC time for lazy sweep after gc() */
5214             record->gc_time += sweep_time;
5215         }
5216         else if (GC_PROFILE_MORE_DETAIL) {
5217             sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
5218         }
5219 
5220 #if GC_PROFILE_MORE_DETAIL
5221         record->gc_sweep_time += sweep_time;
5222         if (deferred_final_list) record->flags |= GPR_FLAG_HAVE_FINALIZE;
5223 #endif
5224     }
5225 }
5226 
5227 static inline void
5228 gc_prof_set_malloc_info(rb_objspace_t *objspace)
5229 {
5230 #if GC_PROFILE_MORE_DETAIL
5231     if (objspace->profile.run) {
5232         gc_profile_record *record = gc_prof_record(objspace);
5233         record->allocate_increase = malloc_increase + malloc_increase2;
5234         record->allocate_limit = malloc_limit;
5235     }
5236 #endif
5237 }
5238 
5239 static inline void
5240 gc_prof_set_heap_info(rb_objspace_t *objspace)
5241 {
5242     if (objspace->profile.run) {
5243         gc_profile_record *record = gc_prof_record(objspace);
5244         size_t live = objspace->profile.total_allocated_object_num_at_gc_start - objspace->total_freed_object_num;
5245         size_t total = objspace->profile.heaps_used_at_gc_start * HEAP_OBJ_LIMIT;
5246 
5247 #if GC_PROFILE_MORE_DETAIL
5248         record->heap_use_slots = objspace->profile.heaps_used_at_gc_start;
5249         record->heap_live_objects = live;
5250         record->heap_free_objects = total - live;
5251 #endif
5252 
5253         record->heap_total_objects = total;
5254         record->heap_use_size = live * sizeof(RVALUE);
5255         record->heap_total_size = total * sizeof(RVALUE);
5256     }
5257 }
5258 
5259 /*
5260  *  call-seq:
5261  *    GC::Profiler.clear          -> nil
5262  *
5263  *  Clears the GC profiler data.
5264  *
5265  */
5266 
5267 static VALUE
5268 gc_profile_clear(void)
5269 {
5270     rb_objspace_t *objspace = &rb_objspace;
5271 
5272     if (GC_PROFILE_RECORD_DEFAULT_SIZE * 2 < objspace->profile.size) {
5273         objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE * 2;
5274         objspace->profile.records = realloc(objspace->profile.records, sizeof(gc_profile_record) * objspace->profile.size);
5275         if (!objspace->profile.records) {
5276             rb_memerror();
5277         }
5278     }
5279     MEMZERO(objspace->profile.records, gc_profile_record, objspace->profile.size);
5280     objspace->profile.next_index = 0;
5281     objspace->profile.current_record = 0;
5282     return Qnil;
5283 }
5284 
5285 static VALUE
5286 gc_profile_flags(int flags)
5287 {
5288     VALUE result = rb_ary_new();
5289     rb_ary_push(result, ID2SYM(rb_intern(flags & GPR_FLAG_MAJOR_MASK ? "major_gc" : "minor_gc")));
5290     if (flags & GPR_FLAG_HAVE_FINALIZE) rb_ary_push(result, ID2SYM(rb_intern("HAVE_FINALIZE")));
5291     if (flags & GPR_FLAG_NEWOBJ)        rb_ary_push(result, ID2SYM(rb_intern("CAUSED_BY_NEWOBJ")));
5292     if (flags & GPR_FLAG_MALLOC)        rb_ary_push(result, ID2SYM(rb_intern("CAUSED_BY_MALLOC")));
5293     if (flags & GPR_FLAG_METHOD)        rb_ary_push(result, ID2SYM(rb_intern("CAUSED_BY_METHOD")));
5294     if (flags & GPR_FLAG_STRESS)        rb_ary_push(result, ID2SYM(rb_intern("CAUSED_BY_STRESS")));
5295     return result;
5296 }
5297 
5298 /*
5299  *  call-seq:
5300  *     GC::Profiler.raw_data    -> [Hash, ...]
5301  *
5302  *  Returns an Array of individual raw profile data Hashes ordered
5303  *  from earliest to latest by +:GC_INVOKE_TIME+.
5304  *
5305  *  For example:
5306  *
5307  *    [
5308  *      {
5309  *         :GC_TIME=>1.3000000000000858e-05,
5310  *         :GC_INVOKE_TIME=>0.010634999999999999,
5311  *         :HEAP_USE_SIZE=>289640,
5312  *         :HEAP_TOTAL_SIZE=>588960,
5313  *         :HEAP_TOTAL_OBJECTS=>14724,
5314  *         :GC_IS_MARKED=>false
5315  *      },
5316  *      # ...
5317  *    ]
5318  *
5319  *  The keys mean:
5320  *
5321  *  +:GC_TIME+::
5322  *      Time elapsed in seconds for this GC run
5323  *  +:GC_INVOKE_TIME+::
5324  *      Time elapsed in seconds from startup to when the GC was invoked
5325  *  +:HEAP_USE_SIZE+::
5326  *      Total bytes of heap used
5327  *  +:HEAP_TOTAL_SIZE+::
5328  *      Total size of heap in bytes
5329  *  +:HEAP_TOTAL_OBJECTS+::
5330  *      Total number of objects
5331  *  +:GC_IS_MARKED+::
5332  *      Returns +true+ if the GC is in mark phase
5333  *
5334  *  If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
5335  *  to the following hash keys:
5336  *
5337  *  +:GC_MARK_TIME+::
5338  *  +:GC_SWEEP_TIME+::
5339  *  +:ALLOCATE_INCREASE+::
5340  *  +:ALLOCATE_LIMIT+::
5341  *  +:HEAP_USE_SLOTS+::
5342  *  +:HEAP_LIVE_OBJECTS+::
5343  *  +:HEAP_FREE_OBJECTS+::
5344  *  +:HAVE_FINALIZE+::
5345  *
5346  */
5347 
5348 static VALUE
5349 gc_profile_record_get(void)
5350 {
5351     VALUE prof;
5352     VALUE gc_profile = rb_ary_new();
5353     size_t i;
5354     rb_objspace_t *objspace = (&rb_objspace);
5355 
5356     if (!objspace->profile.run) {
5357         return Qnil;
5358     }
5359 
5360     for (i =0; i < objspace->profile.next_index; i++) {
5361         gc_profile_record *record = &objspace->profile.records[i];
5362 
5363         prof = rb_hash_new();
5364         rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_profile_flags(record->flags));
5365         rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
5366         rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
5367         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
5368         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
5369         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
5370         rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
5371 #if GC_PROFILE_MORE_DETAIL
5372         rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
5373         rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
5374         rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
5375         rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
5376         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SLOTS")), SIZET2NUM(record->heap_use_slots));
5377         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
5378         rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
5379 
5380         rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
5381         rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
5382 
5383         rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
5384 #endif
5385 
5386 #if RGENGC_PROFILE > 0
5387         rb_hash_aset(prof, ID2SYM(rb_intern("OLDGEN_OBJECTS")), SIZET2NUM(record->oldgen_objects));
5388         rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
5389         rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
5390 #endif
5391         rb_ary_push(gc_profile, prof);
5392     }
5393 
5394     return gc_profile;
5395 }
5396 
5397 static void
5398 gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
5399 {
5400     rb_objspace_t *objspace = &rb_objspace;
5401     size_t count = objspace->profile.next_index;
5402 
5403     if (objspace->profile.run && count /* > 1 */) {
5404         size_t i;
5405         const gc_profile_record *record;
5406 
5407         append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->count));
5408         append(out, rb_str_new_cstr("Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC Time(ms)\n"));
5409 
5410         for (i = 0; i < count; i++) {
5411             record = &objspace->profile.records[i];
5412             append(out, rb_sprintf("%5"PRIdSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
5413                                    i+1, record->gc_invoke_time, record->heap_use_size,
5414                                    record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
5415         }
5416 
5417 #if GC_PROFILE_MORE_DETAIL
5418         append(out, rb_str_new_cstr("\n\n" \
5419                                     "More detail.\n" \
5420                                     "Prepare Time = Previously GC's rest sweep time\n"
5421                                     "Index Flags       Allocate Inc.  Allocate Limit"
5422 #if CALC_EXACT_MALLOC_SIZE
5423                                     "  Allocated Size"
5424 #endif
5425                                     "  Use Slot     Mark Time(ms)    Sweep Time(ms)  Prepare Time(ms)  LivingObj    FreeObj RemovedObj   EmptyObj"
5426 #if RGENGC_PROFILE
5427                                     " OldgenObj RemNormObj RemShadObj"
5428 #endif
5429                                     "\n"));
5430 
5431         for (i = 0; i < count; i++) {
5432             record = &objspace->profile.records[i];
5433             append(out, rb_sprintf("%5"PRIdSIZE" %c/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
5434 #if CALC_EXACT_MALLOC_SIZE
5435                                    " %15"PRIuSIZE
5436 #endif
5437                                    " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
5438 #if RGENGC_PROFILE
5439                                    "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
5440 #endif
5441                                    "\n",
5442                                    i+1,
5443                                    "-+O3S567R9abcdef!"[record->flags & GPR_FLAG_MAJOR_MASK], /* Stress,Rescan,Shady,Oldgen,NoFree */
5444                                    (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
5445                                    (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
5446                                    (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
5447                                    (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
5448                                    (record->flags & GPR_FLAG_CAPI)   ? "CAPI__" : "??????",
5449                                    (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
5450                                    record->allocate_increase, record->allocate_limit,
5451 #if CALC_EXACT_MALLOC_SIZE
5452                                    record->allocated_size,
5453 #endif
5454                                    record->heap_use_slots,
5455                                    record->gc_mark_time*1000,
5456                                    record->gc_sweep_time*1000,
5457                                    record->prepare_time*1000,
5458 
5459                                    record->heap_live_objects,
5460                                    record->heap_free_objects,
5461                                    record->removing_objects,
5462                                    record->empty_objects
5463 #if RGENGC_PROFILE
5464                                    ,
5465                                    record->oldgen_objects,
5466                                    record->remembered_normal_objects,
5467                                    record->remembered_shady_objects
5468 #endif
5469                        ));
5470         }
5471 #endif
5472     }
5473 }
5474 
5475 /*
5476  *  call-seq:
5477  *     GC::Profiler.result  -> String
5478  *
5479  *  Returns a profile data report such as:
5480  *
5481  *    GC 1 invokes.
5482  *    Index    Invoke Time(sec)       Use Size(byte)     Total Size(byte)         Total Object                    GC time(ms)
5483  *        1               0.012               159240               212940                10647         0.00000000000001530000
5484  */
5485 
5486 static VALUE
5487 gc_profile_result(void)
5488 {
5489         VALUE str = rb_str_buf_new(0);
5490         gc_profile_dump_on(str, rb_str_buf_append);
5491         return str;
5492 }
5493 
5494 /*
5495  *  call-seq:
5496  *     GC::Profiler.report
5497  *     GC::Profiler.report(io)
5498  *
5499  *  Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
5500  *
5501  */
5502 
5503 static VALUE
5504 gc_profile_report(int argc, VALUE *argv, VALUE self)
5505 {
5506     VALUE out;
5507 
5508     if (argc == 0) {
5509         out = rb_stdout;
5510     }
5511     else {
5512         rb_scan_args(argc, argv, "01", &out);
5513     }
5514     gc_profile_dump_on(out, rb_io_write);
5515 
5516     return Qnil;
5517 }
5518 
5519 /*
5520  *  call-seq:
5521  *     GC::Profiler.total_time  -> float
5522  *
5523  *  The total time used for garbage collection in seconds
5524  */
5525 
5526 static VALUE
5527 gc_profile_total_time(VALUE self)
5528 {
5529     double time = 0;
5530     rb_objspace_t *objspace = &rb_objspace;
5531 
5532     if (objspace->profile.run && objspace->profile.next_index > 0) {
5533         size_t i;
5534         size_t count = objspace->profile.next_index - 1;
5535 
5536         for (i = 0; i < count; i++) {
5537             time += objspace->profile.records[i].gc_time;
5538         }
5539     }
5540     return DBL2NUM(time);
5541 }
5542 
5543 /*
5544  *  call-seq:
5545  *    GC::Profiler.enabled?     -> true or false
5546  *
5547  *  The current status of GC profile mode.
5548  */
5549 
5550 static VALUE
5551 gc_profile_enable_get(VALUE self)
5552 {
5553     rb_objspace_t *objspace = &rb_objspace;
5554     return objspace->profile.run ? Qtrue : Qfalse;
5555 }
5556 
5557 /*
5558  *  call-seq:
5559  *    GC::Profiler.enable       -> nil
5560  *
5561  *  Starts the GC profiler.
5562  *
5563  */
5564 
5565 static VALUE
5566 gc_profile_enable(void)
5567 {
5568     rb_objspace_t *objspace = &rb_objspace;
5569     rest_sweep(objspace);
5570     objspace->profile.run = TRUE;
5571     return Qnil;
5572 }
5573 
5574 /*
5575  *  call-seq:
5576  *    GC::Profiler.disable      -> nil
5577  *
5578  *  Stops the GC profiler.
5579  *
5580  */
5581 
5582 static VALUE
5583 gc_profile_disable(void)
5584 {
5585     rb_objspace_t *objspace = &rb_objspace;
5586 
5587     objspace->profile.run = FALSE;
5588     objspace->profile.current_record = 0;
5589     return Qnil;
5590 }
5591 
5592 #ifdef GC_DEBUG
5593 
5594 /*
5595   ------------------------------ DEBUG ------------------------------
5596 */
5597 
5598 void
5599 rb_gcdebug_print_obj_condition(VALUE obj)
5600 {
5601     rb_objspace_t *objspace = &rb_objspace;
5602 
5603     if (is_pointer_to_heap(objspace, (void *)obj)) {
5604         fprintf(stderr, "pointer to heap?: true\n");
5605     }
5606     else {
5607         fprintf(stderr, "pointer to heap?: false\n");
5608         return;
5609     }
5610     fprintf(stderr, "marked?: %s\n",
5611             MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ? "true" : "false");
5612     if (is_lazy_sweeping(objspace)) {
5613         fprintf(stderr, "lazy sweeping?: true\n");
5614         fprintf(stderr, "swept?: %s\n",
5615                 is_swept_object(objspace, obj) ? "done" : "not yet");
5616     }
5617     else {
5618         fprintf(stderr, "lazy sweeping?: false\n");
5619     }
5620 }
5621 
5622 static VALUE
5623 gcdebug_sential(VALUE obj, VALUE name)
5624 {
5625     fprintf(stderr, "WARNING: object %s(%p) is inadvertently collected\n", (char *)name, (void *)obj);
5626     return Qnil;
5627 }
5628 
5629 void
5630 rb_gcdebug_sentinel(VALUE obj, const char *name)
5631 {
5632     rb_define_final(obj, rb_proc_new(gcdebug_sential, (VALUE)name));
5633 }
5634 #endif /* GC_DEBUG */
5635 
5636 
5637 /*
5638  * Document-module: ObjectSpace
5639  *
5640  *  The ObjectSpace module contains a number of routines
5641  *  that interact with the garbage collection facility and allow you to
5642  *  traverse all living objects with an iterator.
5643  *
5644  *  ObjectSpace also provides support for object finalizers, procs that will be
5645  *  called when a specific object is about to be destroyed by garbage
5646  *  collection.
5647  *
5648  *     a = "A"
5649  *     b = "B"
5650  *
5651  *     ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5652  *     ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5653  *
5654  *  _produces:_
5655  *
5656  *     Finalizer two on 537763470
5657  *     Finalizer one on 537763480
5658  */
5659 
5660 /*
5661  *  Document-class: ObjectSpace::WeakMap
5662  *
5663  *  An ObjectSpace::WeakMap object holds references to
5664  *  any objects, but those objects can get garbage collected.
5665  *
5666  *  This class is mostly used internally by WeakRef, please use
5667  *  +lib/weakref.rb+ for the public interface.
5668  */
5669 
5670 /*  Document-class: GC::Profiler
5671  *
5672  *  The GC profiler provides access to information on GC runs including time,
5673  *  length and object space size.
5674  *
5675  *  Example:
5676  *
5677  *    GC::Profiler.enable
5678  *
5679  *    require 'rdoc/rdoc'
5680  *
5681  *    GC::Profiler.report
5682  *
5683  *    GC::Profiler.disable
5684  *
5685  *  See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5686  */
5687 
5688 /*
5689  *  The GC module provides an interface to Ruby's mark and
5690  *  sweep garbage collection mechanism.
5691  *
5692  *  Some of the underlying methods are also available via the ObjectSpace
5693  *  module.
5694  *
5695  *  You may obtain information about the operation of the GC through
5696  *  GC::Profiler.
5697  */
5698 
5699 void
5700 Init_GC(void)
5701 {
5702     VALUE rb_mObjSpace;
5703     VALUE rb_mProfiler;
5704 
5705     rb_mGC = rb_define_module("GC");
5706     rb_define_singleton_method(rb_mGC, "start", rb_gc_start, 0);
5707     rb_define_singleton_method(rb_mGC, "enable", rb_gc_enable, 0);
5708     rb_define_singleton_method(rb_mGC, "disable", rb_gc_disable, 0);
5709     rb_define_singleton_method(rb_mGC, "stress", gc_stress_get, 0);
5710     rb_define_singleton_method(rb_mGC, "stress=", gc_stress_set, 1);
5711     rb_define_singleton_method(rb_mGC, "count", gc_count, 0);
5712     rb_define_singleton_method(rb_mGC, "stat", gc_stat, -1);
5713     rb_define_method(rb_mGC, "garbage_collect", rb_gc_start, 0);
5714 
5715     rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
5716     rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
5717     rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
5718     rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
5719     rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
5720     rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
5721     rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
5722     rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
5723     rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
5724 
5725     rb_mObjSpace = rb_define_module("ObjectSpace");
5726     rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5727     rb_define_module_function(rb_mObjSpace, "garbage_collect", rb_gc_start, 0);
5728 
5729     rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5730     rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5731 
5732     rb_define_module_function(rb_mObjSpace, "_id2ref", id2ref, 1);
5733 
5734     nomem_error = rb_exc_new3(rb_eNoMemError,
5735                               rb_obj_freeze(rb_str_new2("failed to allocate memory")));
5736     OBJ_TAINT(nomem_error);
5737     OBJ_FREEZE(nomem_error);
5738 
5739     rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5740     rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5741 
5742     rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5743 
5744     {
5745         VALUE rb_cWeakMap = rb_define_class_under(rb_mObjSpace, "WeakMap", rb_cObject);
5746         rb_define_alloc_func(rb_cWeakMap, wmap_allocate);
5747         rb_define_method(rb_cWeakMap, "[]=", wmap_aset, 2);
5748         rb_define_method(rb_cWeakMap, "[]", wmap_aref, 1);
5749         rb_define_private_method(rb_cWeakMap, "finalize", wmap_finalize, 1);
5750     }
5751 
5752 #if CALC_EXACT_MALLOC_SIZE
5753     rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
5754     rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
5755 #endif
5756 }