14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
17#include "ruby/internal/config.h"
24#define sighandler_t ruby_sighandler_t
36#ifndef HAVE_MALLOC_USABLE_SIZE
38# define HAVE_MALLOC_USABLE_SIZE
39# define malloc_usable_size(a) _msize(a)
40# elif defined HAVE_MALLOC_SIZE
41# define HAVE_MALLOC_USABLE_SIZE
42# define malloc_usable_size(a) malloc_size(a)
46#ifdef HAVE_MALLOC_USABLE_SIZE
47# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
49# elif defined(HAVE_MALLOC_H)
51# elif defined(HAVE_MALLOC_NP_H)
52# include <malloc_np.h>
53# elif defined(HAVE_MALLOC_MALLOC_H)
54# include <malloc/malloc.h>
58#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
68#ifdef HAVE_SYS_RESOURCE_H
69# include <sys/resource.h>
72#if defined _WIN32 || defined __CYGWIN__
74#elif defined(HAVE_POSIX_MEMALIGN)
75#elif defined(HAVE_MEMALIGN)
82#include <emscripten.h>
88#include "debug_counter.h"
89#include "eval_intern.h"
93#include "internal/class.h"
94#include "internal/complex.h"
95#include "internal/cont.h"
96#include "internal/error.h"
97#include "internal/eval.h"
98#include "internal/gc.h"
99#include "internal/hash.h"
100#include "internal/imemo.h"
101#include "internal/io.h"
102#include "internal/numeric.h"
103#include "internal/object.h"
104#include "internal/proc.h"
105#include "internal/rational.h"
106#include "internal/sanitizers.h"
107#include "internal/struct.h"
108#include "internal/symbol.h"
109#include "internal/thread.h"
110#include "internal/variable.h"
111#include "internal/warnings.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
124#include "transient_heap.h"
127#include "vm_callinfo.h"
128#include "ractor_core.h"
132#define rb_setjmp(env) RUBY_SETJMP(env)
133#define rb_jmp_buf rb_jmpbuf_t
134#undef rb_data_object_wrap
136#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
137#define MAP_ANONYMOUS MAP_ANON
141size_add_overflow(size_t x, size_t y)
147#elif __has_builtin(__builtin_add_overflow)
148 p = __builtin_add_overflow(x, y, &z);
150#elif defined(DSIZE_T)
166size_mul_add_overflow(size_t x, size_t y, size_t z)
174size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w)
182PRINTF_ARGS(NORETURN(
static void gc_raise(VALUE,
const char*, ...)), 2, 3);
185size_mul_or_raise(
size_t x,
size_t y, VALUE exc)
188 if (LIKELY(!t.left)) {
197 "integer overflow: %"PRIuSIZE
200 x, y, (
size_t)SIZE_MAX);
205rb_size_mul_or_raise(
size_t x,
size_t y, VALUE exc)
207 return size_mul_or_raise(x, y, exc);
211size_mul_add_or_raise(
size_t x,
size_t y,
size_t z, VALUE exc)
214 if (LIKELY(!t.left)) {
223 "integer overflow: %"PRIuSIZE
227 x, y, z, (
size_t)SIZE_MAX);
232rb_size_mul_add_or_raise(
size_t x,
size_t y,
size_t z, VALUE exc)
234 return size_mul_add_or_raise(x, y, z, exc);
238size_mul_add_mul_or_raise(
size_t x,
size_t y,
size_t z,
size_t w, VALUE exc)
241 if (LIKELY(!t.left)) {
250 "integer overflow: %"PRIdSIZE
255 x, y, z, w, (
size_t)SIZE_MAX);
259#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
261volatile VALUE rb_gc_guarded_val;
263rb_gc_guarded_ptr_val(
volatile VALUE *ptr, VALUE val)
265 rb_gc_guarded_val = val;
271#ifndef GC_HEAP_INIT_SLOTS
272#define GC_HEAP_INIT_SLOTS 10000
274#ifndef GC_HEAP_FREE_SLOTS
275#define GC_HEAP_FREE_SLOTS 4096
277#ifndef GC_HEAP_GROWTH_FACTOR
278#define GC_HEAP_GROWTH_FACTOR 1.8
280#ifndef GC_HEAP_GROWTH_MAX_SLOTS
281#define GC_HEAP_GROWTH_MAX_SLOTS 0
283#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
284#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
287#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
288#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
290#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
291#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
293#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
294#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
297#ifndef GC_MALLOC_LIMIT_MIN
298#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
300#ifndef GC_MALLOC_LIMIT_MAX
301#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
303#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
304#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
307#ifndef GC_OLDMALLOC_LIMIT_MIN
308#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
310#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
311#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
313#ifndef GC_OLDMALLOC_LIMIT_MAX
314#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
317#ifndef PRINT_MEASURE_LINE
318#define PRINT_MEASURE_LINE 0
320#ifndef PRINT_ENTER_EXIT_TICK
321#define PRINT_ENTER_EXIT_TICK 0
323#ifndef PRINT_ROOT_TICKS
324#define PRINT_ROOT_TICKS 0
327#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_MEASURE_LINE || PRINT_ROOT_TICKS)
331 size_t heap_init_slots;
332 size_t heap_free_slots;
333 double growth_factor;
334 size_t growth_max_slots;
336 double heap_free_slots_min_ratio;
337 double heap_free_slots_goal_ratio;
338 double heap_free_slots_max_ratio;
339 double oldobject_limit_factor;
341 size_t malloc_limit_min;
342 size_t malloc_limit_max;
343 double malloc_limit_growth_factor;
345 size_t oldmalloc_limit_min;
346 size_t oldmalloc_limit_max;
347 double oldmalloc_limit_growth_factor;
355 GC_HEAP_GROWTH_FACTOR,
356 GC_HEAP_GROWTH_MAX_SLOTS,
358 GC_HEAP_FREE_SLOTS_MIN_RATIO,
359 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
360 GC_HEAP_FREE_SLOTS_MAX_RATIO,
361 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
365 GC_MALLOC_LIMIT_GROWTH_FACTOR,
367 GC_OLDMALLOC_LIMIT_MIN,
368 GC_OLDMALLOC_LIMIT_MAX,
369 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
390#define RGENGC_DEBUG -1
392#define RGENGC_DEBUG 0
395#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
396# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
397#elif defined(HAVE_VA_ARGS_MACRO)
398# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
400# define RGENGC_DEBUG_ENABLED(level) 0
402int ruby_rgengc_debug;
412#ifndef RGENGC_CHECK_MODE
413#define RGENGC_CHECK_MODE 0
417#define GC_ASSERT(expr) RUBY_ASSERT_MESG_WHEN(RGENGC_CHECK_MODE > 0, expr, #expr)
425#ifndef RGENGC_OLD_NEWOBJ_CHECK
426#define RGENGC_OLD_NEWOBJ_CHECK 0
434#ifndef RGENGC_PROFILE
435#define RGENGC_PROFILE 0
444#ifndef RGENGC_ESTIMATE_OLDMALLOC
445#define RGENGC_ESTIMATE_OLDMALLOC 1
451#ifndef RGENGC_FORCE_MAJOR_GC
452#define RGENGC_FORCE_MAJOR_GC 0
455#ifndef GC_PROFILE_MORE_DETAIL
456#define GC_PROFILE_MORE_DETAIL 0
458#ifndef GC_PROFILE_DETAIL_MEMORY
459#define GC_PROFILE_DETAIL_MEMORY 0
461#ifndef GC_ENABLE_INCREMENTAL_MARK
462#define GC_ENABLE_INCREMENTAL_MARK USE_RINCGC
464#ifndef GC_ENABLE_LAZY_SWEEP
465#define GC_ENABLE_LAZY_SWEEP 1
467#ifndef CALC_EXACT_MALLOC_SIZE
468#define CALC_EXACT_MALLOC_SIZE USE_GC_MALLOC_OBJ_INFO_DETAILS
470#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
471#ifndef MALLOC_ALLOCATED_SIZE
472#define MALLOC_ALLOCATED_SIZE 0
475#define MALLOC_ALLOCATED_SIZE 0
477#ifndef MALLOC_ALLOCATED_SIZE_CHECK
478#define MALLOC_ALLOCATED_SIZE_CHECK 0
481#ifndef GC_DEBUG_STRESS_TO_CLASS
482#define GC_DEBUG_STRESS_TO_CLASS 0
485#ifndef RGENGC_OBJ_INFO
486#define RGENGC_OBJ_INFO (RGENGC_DEBUG | RGENGC_CHECK_MODE)
490 GPR_FLAG_NONE = 0x000,
492 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
493 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
494 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
495 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
496#if RGENGC_ESTIMATE_OLDMALLOC
497 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
499 GPR_FLAG_MAJOR_MASK = 0x0ff,
502 GPR_FLAG_NEWOBJ = 0x100,
503 GPR_FLAG_MALLOC = 0x200,
504 GPR_FLAG_METHOD = 0x400,
505 GPR_FLAG_CAPI = 0x800,
506 GPR_FLAG_STRESS = 0x1000,
509 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
510 GPR_FLAG_HAVE_FINALIZE = 0x4000,
511 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
512 GPR_FLAG_FULL_MARK = 0x10000,
513 GPR_FLAG_COMPACT = 0x20000,
516 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
517 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
518} gc_profile_record_flag;
524 double gc_invoke_time;
526 size_t heap_total_objects;
527 size_t heap_use_size;
528 size_t heap_total_size;
529 size_t moved_objects;
531#if GC_PROFILE_MORE_DETAIL
533 double gc_sweep_time;
535 size_t heap_use_pages;
536 size_t heap_live_objects;
537 size_t heap_free_objects;
539 size_t allocate_increase;
540 size_t allocate_limit;
543 size_t removing_objects;
544 size_t empty_objects;
545#if GC_PROFILE_DETAIL_MEMORY
551#if MALLOC_ALLOCATED_SIZE
552 size_t allocated_size;
555#if RGENGC_PROFILE > 0
557 size_t remembered_normal_objects;
558 size_t remembered_shady_objects;
562#define FL_FROM_FREELIST FL_USER0
570#define RMOVED(obj) ((struct RMoved *)(obj))
628typedef uintptr_t bits_t;
630 BITS_SIZE =
sizeof(bits_t),
631 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
633#define popcount_bits rb_popcount_intptr
650#define STACK_CHUNK_SIZE 500
653 VALUE data[STACK_CHUNK_SIZE];
663 size_t unused_cache_size;
666#define SIZE_POOL_EDEN_HEAP(size_pool) (&(size_pool)->eden_heap)
667#define SIZE_POOL_TOMB_HEAP(size_pool) (&(size_pool)->tomb_heap)
674 RVALUE * compact_cursor_index;
675#if GC_ENABLE_INCREMENTAL_MARK
685 size_t allocatable_pages;
693 size_t force_major_gc_count;
710#if MALLOC_ALLOCATED_SIZE
711 size_t allocated_size;
717 unsigned int mode : 2;
718 unsigned int immediate_sweep : 1;
719 unsigned int dont_gc : 1;
720 unsigned int dont_incremental : 1;
721 unsigned int during_gc : 1;
722 unsigned int during_compacting : 1;
723 unsigned int gc_stressful: 1;
724 unsigned int has_hook: 1;
725 unsigned int during_minor_gc : 1;
726#if GC_ENABLE_INCREMENTAL_MARK
727 unsigned int during_incremental_marking : 1;
729 unsigned int measure_gc : 1;
732 rb_event_flag_t hook_events;
733 size_t total_allocated_objects;
734 VALUE next_object_id;
739 rb_atomic_t finalizing;
747 size_t allocated_pages;
748 size_t allocatable_pages;
749 size_t sorted_length;
751 size_t freeable_pages;
755 VALUE deferred_final;
762 unsigned int latest_gc_info;
768#if GC_PROFILE_MORE_DETAIL
773 size_t minor_gc_count;
774 size_t major_gc_count;
775 size_t compact_count;
776 size_t read_barrier_faults;
777#if RGENGC_PROFILE > 0
778 size_t total_generated_normal_object_count;
779 size_t total_generated_shady_object_count;
780 size_t total_shade_operation_count;
781 size_t total_promoted_count;
782 size_t total_remembered_normal_object_count;
783 size_t total_remembered_shady_object_count;
785#if RGENGC_PROFILE >= 2
786 size_t generated_normal_object_count_types[
RUBY_T_MASK];
787 size_t generated_shady_object_count_types[
RUBY_T_MASK];
790 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
791 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
796 double gc_sweep_start_time;
797 size_t total_allocated_objects_at_gc_start;
798 size_t heap_used_at_gc_start;
802 size_t total_freed_objects;
803 size_t total_allocated_pages;
804 size_t total_freed_pages;
805 uint64_t total_time_ns;
810 VALUE gc_stress_mode;
815 size_t last_major_gc;
816 size_t uncollectible_wb_unprotected_objects;
817 size_t uncollectible_wb_unprotected_objects_limit;
819 size_t old_objects_limit;
821#if RGENGC_ESTIMATE_OLDMALLOC
822 size_t oldmalloc_increase;
823 size_t oldmalloc_increase_limit;
826#if RGENGC_CHECK_MODE >= 2
833 size_t considered_count_table[
T_MASK];
834 size_t moved_count_table[
T_MASK];
838#if GC_ENABLE_INCREMENTAL_MARK
848#if GC_DEBUG_STRESS_TO_CLASS
849 VALUE stress_to_class;
855#define HEAP_PAGE_ALIGN_LOG 14
856#define CEILDIV(i, mod) (((i) + (mod) - 1)/(mod))
858 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
859 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
860 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
861 HEAP_PAGE_OBJ_LIMIT = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header))/sizeof(struct
RVALUE)),
862 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE,
sizeof(
struct RVALUE)), BITS_BITLENGTH),
863 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
865#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
866#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
869# if HAVE_CONST_PAGE_SIZE
871static const bool USE_MMAP_ALIGNED_ALLOC = (PAGE_SIZE <= HEAP_PAGE_SIZE);
872# elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
874static const bool USE_MMAP_ALIGNED_ALLOC =
true;
877# define USE_MMAP_ALIGNED_ALLOC (use_mmap_aligned_alloc != false)
879static bool use_mmap_aligned_alloc;
881#elif !defined(__MINGW32__) && !defined(_WIN32)
882static const bool USE_MMAP_ALIGNED_ALLOC =
false;
892 unsigned int before_sweep : 1;
893 unsigned int has_remembered_objects : 1;
894 unsigned int has_uncollectible_shady_objects : 1;
895 unsigned int in_tomb : 1;
905 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
907 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
908 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
909 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
912 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
915#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
916#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
917#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
919#define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK)/sizeof(RVALUE))
920#define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
921#define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
922#define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
925#define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
926#define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
927#define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
930#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
931#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
932#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
933#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
934#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
937#define rb_objspace (*rb_objspace_of(GET_VM()))
938#define rb_objspace_of(vm) ((vm)->objspace)
940#define ruby_initial_gc_stress gc_params.gc_stress
942VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
944#define malloc_limit objspace->malloc_params.limit
945#define malloc_increase objspace->malloc_params.increase
946#define malloc_allocated_size objspace->malloc_params.allocated_size
947#define heap_pages_sorted objspace->heap_pages.sorted
948#define heap_allocated_pages objspace->heap_pages.allocated_pages
949#define heap_pages_sorted_length objspace->heap_pages.sorted_length
950#define heap_pages_lomem objspace->heap_pages.range[0]
951#define heap_pages_himem objspace->heap_pages.range[1]
952#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
953#define heap_pages_final_slots objspace->heap_pages.final_slots
954#define heap_pages_deferred_final objspace->heap_pages.deferred_final
955#define size_pools objspace->size_pools
956#define during_gc objspace->flags.during_gc
957#define finalizing objspace->atomic_flags.finalizing
958#define finalizer_table objspace->finalizer_table
959#define global_list objspace->global_list
960#define ruby_gc_stressful objspace->flags.gc_stressful
961#define ruby_gc_stress_mode objspace->gc_stress_mode
962#if GC_DEBUG_STRESS_TO_CLASS
963#define stress_to_class objspace->stress_to_class
965#define stress_to_class 0
969#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
970#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
971#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), (int)b), objspace->flags.dont_gc = (b))
972#define dont_gc_val() (objspace->flags.dont_gc)
974#define dont_gc_on() (objspace->flags.dont_gc = 1)
975#define dont_gc_off() (objspace->flags.dont_gc = 0)
976#define dont_gc_set(b) (((int)b), objspace->flags.dont_gc = (b))
977#define dont_gc_val() (objspace->flags.dont_gc)
980static inline enum gc_mode
981gc_mode_verify(
enum gc_mode mode)
983#if RGENGC_CHECK_MODE > 0
986 case gc_mode_marking:
987 case gc_mode_sweeping:
990 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
999 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1000 if (SIZE_POOL_EDEN_HEAP(&size_pools[i])->sweeping_page) {
1011 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1012 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_pages;
1021 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1022 count += SIZE_POOL_EDEN_HEAP(&size_pools[i])->total_slots;
1031 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1032 count += SIZE_POOL_TOMB_HEAP(&size_pools[i])->total_pages;
1041 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1042 count += size_pools[i].allocatable_pages;
1051 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1053 int slot_size_multiple = size_pool->slot_size /
sizeof(
RVALUE);
1054 count += size_pool->allocatable_pages * HEAP_PAGE_OBJ_LIMIT / slot_size_multiple;
1059#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1060#define gc_mode_set(objspace, mode) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(mode))
1062#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1063#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1064#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1065#if GC_ENABLE_INCREMENTAL_MARK
1066#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1068#define is_incremental_marking(objspace) FALSE
1070#if GC_ENABLE_INCREMENTAL_MARK
1071#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1073#define will_be_incremental_marking(objspace) FALSE
1075#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1077#if SIZEOF_LONG == SIZEOF_VOIDP
1078# define nonspecial_obj_id(obj) (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG)
1079# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
1080#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1081# define nonspecial_obj_id(obj) LL2NUM((SIGNED_VALUE)(obj) / 2)
1082# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1083 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1085# error not supported
1088#define RANY(o) ((RVALUE*)(o))
1093 void (*dfree)(
void *);
1097#define RZOMBIE(o) ((struct RZombie *)(o))
1099#define nomem_error GET_VM()->special_exceptions[ruby_error_nomemory]
1101#if RUBY_MARK_FREE_DEBUG
1102int ruby_gc_debug_indent = 0;
1105int ruby_disable_gc = 0;
1106int ruby_enable_autocompact = 0;
1108void rb_iseq_mark(
const rb_iseq_t *iseq);
1109void rb_iseq_update_references(
rb_iseq_t *iseq);
1110void rb_iseq_free(
const rb_iseq_t *iseq);
1111size_t rb_iseq_memsize(
const rb_iseq_t *iseq);
1112void rb_vm_update_references(
void *ptr);
1114void rb_gcdebug_print_obj_condition(VALUE obj);
1116static VALUE define_final0(VALUE obj, VALUE block);
1118NORETURN(
static void *gc_vraise(
void *ptr));
1119NORETURN(
static void gc_raise(VALUE exc,
const char *fmt, ...));
1120NORETURN(
static void negative_size_allocation_error(
const char *));
1126static int garbage_collect(
rb_objspace_t *,
unsigned int reason);
1128static int gc_start(
rb_objspace_t *objspace,
unsigned int reason);
1131enum gc_enter_event {
1132 gc_enter_event_start,
1133 gc_enter_event_mark_continue,
1134 gc_enter_event_sweep_continue,
1135 gc_enter_event_rest,
1136 gc_enter_event_finalizer,
1137 gc_enter_event_rb_memerror,
1140static inline void gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1141static inline void gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1143static void gc_marks(
rb_objspace_t *objspace,
int full_mark);
1144static void gc_marks_start(
rb_objspace_t *objspace,
int full);
1156static inline void gc_mark(
rb_objspace_t *objspace, VALUE ptr);
1157static inline void gc_pin(
rb_objspace_t *objspace, VALUE ptr);
1158static inline void gc_mark_and_pin(
rb_objspace_t *objspace, VALUE ptr);
1160NO_SANITIZE(
"memory",
static void gc_mark_maybe(
rb_objspace_t *objspace, VALUE ptr));
1161static void gc_mark_children(
rb_objspace_t *objspace, VALUE ptr);
1163static int gc_mark_stacked_objects_incremental(
rb_objspace_t *,
size_t count);
1167static inline int gc_mark_set(
rb_objspace_t *objspace, VALUE obj);
1168NO_SANITIZE(
"memory",
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr));
1173static void shrink_stack_chunk_cache(
mark_stack_t *stack);
1175static size_t obj_memsize_of(VALUE obj,
int use_all_types);
1176static void gc_verify_internal_consistency(
rb_objspace_t *objspace);
1180static void gc_stress_set(
rb_objspace_t *objspace, VALUE flag);
1183static double getrusage_time(
void);
1184static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason);
1187static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1189static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1190static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1194#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
1195 if (gc_object_moved_p(_objspace, (VALUE)_thing)) { \
1196 *((_type *)(&_thing)) = (_type)RMOVED((_thing))->destination; \
1200#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
1202#define gc_prof_record(objspace) (objspace)->profile.current_record
1203#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1205#ifdef HAVE_VA_ARGS_MACRO
1206# define gc_report(level, objspace, ...) \
1207 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1209# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1211PRINTF_ARGS(
static void gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...), 3, 4);
1212static const char *obj_info(VALUE obj);
1213static const char *obj_type_name(VALUE obj);
1233#if defined(__GNUC__) && defined(__i386__)
1234typedef unsigned long long tick_t;
1235#define PRItick "llu"
1239 unsigned long long int x;
1240 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1244#elif defined(__GNUC__) && defined(__x86_64__)
1245typedef unsigned long long tick_t;
1246#define PRItick "llu"
1248static __inline__ tick_t
1251 unsigned long hi, lo;
1252 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
1253 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
1256#elif defined(__powerpc64__) && GCC_VERSION_SINCE(4,8,0)
1257typedef unsigned long long tick_t;
1258#define PRItick "llu"
1260static __inline__ tick_t
1263 unsigned long long val = __builtin_ppc_get_timebase();
1267#elif defined(__aarch64__) && defined(__GNUC__)
1268typedef unsigned long tick_t;
1271static __inline__ tick_t
1275 __asm__ __volatile__ (
"mrs %0, cntvct_el0" :
"=r" (val));
1280#elif defined(_WIN32) && defined(_MSC_VER)
1282typedef unsigned __int64 tick_t;
1283#define PRItick "llu"
1292typedef clock_t tick_t;
1293#define PRItick "llu"
1303typedef double tick_t;
1304#define PRItick "4.9f"
1309 return getrusage_time();
1312#error "choose tick type"
1315#define MEASURE_LINE(expr) do { \
1316 volatile tick_t start_time = tick(); \
1317 volatile tick_t end_time; \
1319 end_time = tick(); \
1320 fprintf(stderr, "0\t%"PRItick"\t%s\n", end_time - start_time, #expr); \
1324#define MEASURE_LINE(expr) expr
1328asan_unpoison_object_temporary(VALUE obj)
1330 void *ptr = asan_poisoned_object_p(obj);
1331 asan_unpoison_object(obj,
false);
1335#define FL_CHECK2(name, x, pred) \
1336 ((RGENGC_CHECK_MODE && SPECIAL_CONST_P(x)) ? \
1337 (rb_bug(name": SPECIAL_CONST (%p)", (void *)(x)), 0) : (pred))
1338#define FL_TEST2(x,f) FL_CHECK2("FL_TEST2", x, FL_TEST_RAW((x),(f)) != 0)
1339#define FL_SET2(x,f) FL_CHECK2("FL_SET2", x, RBASIC(x)->flags |= (f))
1340#define FL_UNSET2(x,f) FL_CHECK2("FL_UNSET2", x, RBASIC(x)->flags &= ~(f))
1342#define RVALUE_MARK_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1343#define RVALUE_PIN_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1344#define RVALUE_PAGE_MARKED(page, obj) MARKED_IN_BITMAP((page)->mark_bits, (obj))
1346#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1347#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1348#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1350#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1351#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1352#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1354#define RVALUE_OLD_AGE 3
1355#define RVALUE_AGE_SHIFT 5
1357static int rgengc_remembered(
rb_objspace_t *objspace, VALUE obj);
1358static int rgengc_remembered_sweep(
rb_objspace_t *objspace, VALUE obj);
1359static int rgengc_remember(
rb_objspace_t *objspace, VALUE obj);
1364RVALUE_FLAGS_AGE(VALUE
flags)
1370check_rvalue_consistency_force(
const VALUE obj,
int terminate)
1375 RB_VM_LOCK_ENTER_NO_BARRIER();
1378 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1381 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1384 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1386 list_for_each(&size_pool->tomb_heap.pages, page, page_node) {
1387 if (&page->start[0] <= (
RVALUE *)obj &&
1388 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * size_pool->slot_size))) {
1389 fprintf(stderr,
"check_rvalue_consistency: %p is in a tomb_heap (%p).\n",
1390 (
void *)obj, (
void *)page);
1397 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1403 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1404 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1405 const int mark_bit = RVALUE_MARK_BITMAP(obj) != 0;
1406 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0, remembered_bit = marking_bit;
1407 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1409 if (GET_HEAP_PAGE(obj)->flags.in_tomb) {
1410 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", obj_info(obj));
1414 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", obj_info(obj));
1418 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", obj_info(obj));
1422 obj_memsize_of((VALUE)obj, FALSE);
1428 if (age > 0 && wb_unprotected_bit) {
1429 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", obj_info(obj), age);
1433 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1434 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", obj_info(obj));
1438 if (!is_full_marking(objspace)) {
1439 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1440 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1441 obj_info(obj), age);
1444 if (remembered_bit && age != RVALUE_OLD_AGE) {
1445 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1446 obj_info(obj), age);
1458 if (is_incremental_marking(objspace) && marking_bit) {
1459 if (!is_marking(objspace) && !mark_bit) {
1460 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", obj_info(obj));
1466 RB_VM_LOCK_LEAVE_NO_BARRIER();
1468 if (err > 0 && terminate) {
1469 rb_bug(
"check_rvalue_consistency_force: there is %d errors.", err);
1474#if RGENGC_CHECK_MODE == 0
1476check_rvalue_consistency(
const VALUE obj)
1482check_rvalue_consistency(
const VALUE obj)
1484 check_rvalue_consistency_force(obj, TRUE);
1496 void *poisoned = asan_poisoned_object_p(obj);
1497 asan_unpoison_object(obj,
false);
1503 asan_poison_object(obj);
1510RVALUE_MARKED(VALUE obj)
1512 check_rvalue_consistency(obj);
1513 return RVALUE_MARK_BITMAP(obj) != 0;
1517RVALUE_PINNED(VALUE obj)
1519 check_rvalue_consistency(obj);
1520 return RVALUE_PIN_BITMAP(obj) != 0;
1524RVALUE_WB_UNPROTECTED(VALUE obj)
1526 check_rvalue_consistency(obj);
1527 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1531RVALUE_MARKING(VALUE obj)
1533 check_rvalue_consistency(obj);
1534 return RVALUE_MARKING_BITMAP(obj) != 0;
1538RVALUE_REMEMBERED(VALUE obj)
1540 check_rvalue_consistency(obj);
1541 return RVALUE_MARKING_BITMAP(obj) != 0;
1545RVALUE_UNCOLLECTIBLE(VALUE obj)
1547 check_rvalue_consistency(obj);
1548 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1552RVALUE_OLD_P_RAW(VALUE obj)
1555 return (
RBASIC(obj)->flags & promoted) == promoted;
1559RVALUE_OLD_P(VALUE obj)
1561 check_rvalue_consistency(obj);
1562 return RVALUE_OLD_P_RAW(obj);
1565#if RGENGC_CHECK_MODE || GC_DEBUG
1567RVALUE_AGE(VALUE obj)
1569 check_rvalue_consistency(obj);
1570 return RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
1577 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1578 objspace->rgengc.old_objects++;
1579 rb_transient_heap_promote(obj);
1581#if RGENGC_PROFILE >= 2
1582 objspace->profile.total_promoted_count++;
1588RVALUE_OLD_UNCOLLECTIBLE_SET(
rb_objspace_t *objspace, VALUE obj)
1590 RB_DEBUG_COUNTER_INC(obj_promote);
1591 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1595RVALUE_FLAGS_AGE_SET(VALUE flags,
int age)
1598 flags |= (age << RVALUE_AGE_SHIFT);
1606 VALUE flags =
RBASIC(obj)->flags;
1607 int age = RVALUE_FLAGS_AGE(flags);
1609 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1610 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", obj_info(obj));
1614 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(flags, age);
1616 if (age == RVALUE_OLD_AGE) {
1617 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1619 check_rvalue_consistency(obj);
1626 check_rvalue_consistency(obj);
1627 GC_ASSERT(!RVALUE_OLD_P(obj));
1629 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, RVALUE_OLD_AGE);
1630 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1632 check_rvalue_consistency(obj);
1639 check_rvalue_consistency(obj);
1640 GC_ASSERT(!RVALUE_OLD_P(obj));
1642 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, RVALUE_OLD_AGE - 1);
1644 check_rvalue_consistency(obj);
1650 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1651 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1657 check_rvalue_consistency(obj);
1658 GC_ASSERT(RVALUE_OLD_P(obj));
1660 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(obj)) {
1661 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
1664 RVALUE_DEMOTE_RAW(objspace, obj);
1666 if (RVALUE_MARKED(obj)) {
1667 objspace->rgengc.old_objects--;
1670 check_rvalue_consistency(obj);
1674RVALUE_AGE_RESET_RAW(VALUE obj)
1676 RBASIC(obj)->flags = RVALUE_FLAGS_AGE_SET(
RBASIC(obj)->flags, 0);
1680RVALUE_AGE_RESET(VALUE obj)
1682 check_rvalue_consistency(obj);
1683 GC_ASSERT(!RVALUE_OLD_P(obj));
1685 RVALUE_AGE_RESET_RAW(obj);
1686 check_rvalue_consistency(obj);
1690RVALUE_BLACK_P(VALUE obj)
1692 return RVALUE_MARKED(obj) && !RVALUE_MARKING(obj);
1697RVALUE_GREY_P(VALUE obj)
1699 return RVALUE_MARKED(obj) && RVALUE_MARKING(obj);
1704RVALUE_WHITE_P(VALUE obj)
1706 return RVALUE_MARKED(obj) == FALSE;
1716 return calloc(1, n);
1720rb_objspace_alloc(
void)
1723 objspace->flags.measure_gc = 1;
1724 malloc_limit = gc_params.malloc_limit_min;
1726 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1729 size_pool->slot_size =
sizeof(
RVALUE) * (1 << i);
1731 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
1732 list_head_init(&SIZE_POOL_TOMB_HEAP(size_pool)->pages);
1747 if (is_lazy_sweeping(objspace))
1748 rb_bug(
"lazy sweeping underway when freeing object space");
1750 if (objspace->profile.records) {
1751 free(objspace->profile.records);
1752 objspace->profile.records = 0;
1757 for (list = global_list; list; list = next) {
1762 if (heap_pages_sorted) {
1764 for (i = 0; i < heap_allocated_pages; ++i) {
1765 heap_page_free(objspace, heap_pages_sorted[i]);
1767 free(heap_pages_sorted);
1768 heap_allocated_pages = 0;
1769 heap_pages_sorted_length = 0;
1770 heap_pages_lomem = 0;
1771 heap_pages_himem = 0;
1773 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1775 SIZE_POOL_EDEN_HEAP(size_pool)->total_pages = 0;
1776 SIZE_POOL_EDEN_HEAP(size_pool)->total_slots = 0;
1779 st_free_table(objspace->id_to_obj_tbl);
1780 st_free_table(objspace->obj_to_id_tbl);
1782 free_stack_chunks(&objspace->mark_stack);
1783 mark_stack_free_cache(&objspace->mark_stack);
1789heap_pages_expand_sorted_to(
rb_objspace_t *objspace,
size_t next_length)
1792 size_t size = size_mul_or_raise(next_length,
sizeof(
struct heap_page *), rb_eRuntimeError);
1794 gc_report(3, objspace,
"heap_pages_expand_sorted: next_length: %"PRIdSIZE
", size: %"PRIdSIZE
"\n",
1797 if (heap_pages_sorted_length > 0) {
1798 sorted = (
struct heap_page **)realloc(heap_pages_sorted, size);
1799 if (sorted) heap_pages_sorted = sorted;
1802 sorted = heap_pages_sorted = (
struct heap_page **)malloc(size);
1809 heap_pages_sorted_length = next_length;
1820 size_t next_length = heap_allocatable_pages(objspace);
1821 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
1823 next_length += SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
1824 next_length += SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
1827 if (next_length > heap_pages_sorted_length) {
1828 heap_pages_expand_sorted_to(objspace, next_length);
1831 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
1832 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
1838 size_pool->allocatable_pages = s;
1839 heap_pages_expand_sorted(objspace);
1845 ASSERT_vm_locking();
1849 asan_unpoison_object(obj,
false);
1851 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
1853 p->as.free.flags = 0;
1854 p->as.free.next = page->freelist;
1856 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
1858 if (RGENGC_CHECK_MODE &&
1860 !(&page->start[0] <= (
RVALUE *)obj &&
1861 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1862 obj %
sizeof(
RVALUE) == 0)) {
1863 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)p);
1866 asan_poison_object(obj);
1867 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
1873 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
1874 GC_ASSERT(page->free_slots != 0);
1875 GC_ASSERT(page->freelist != NULL);
1877 page->free_next = heap->free_pages;
1878 heap->free_pages = page;
1880 RUBY_DEBUG_LOG(
"page:%p freelist:%p", (
void *)page, (
void *)page->freelist);
1882 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
1885#if GC_ENABLE_INCREMENTAL_MARK
1889 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
1890 GC_ASSERT(page->free_slots != 0);
1891 GC_ASSERT(page->freelist != NULL);
1893 page->free_next = heap->pooled_pages;
1894 heap->pooled_pages = page;
1895 objspace->rincgc.pooled_slots += page->free_slots;
1897 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
1904 list_del(&page->page_node);
1905 heap->total_pages--;
1906 heap->total_slots -= page->total_slots;
1909static void rb_aligned_free(
void *ptr,
size_t size);
1914 heap_allocated_pages--;
1915 objspace->profile.total_freed_pages++;
1916 rb_aligned_free(GET_PAGE_BODY(page->start), HEAP_PAGE_SIZE);
1925 bool has_pages_in_tomb_heap = FALSE;
1926 for (i = 0; i < SIZE_POOL_COUNT; i++) {
1927 if (!list_empty(&SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages)) {
1928 has_pages_in_tomb_heap = TRUE;
1933 if (has_pages_in_tomb_heap) {
1934 for (i = j = 1; j < heap_allocated_pages; i++) {
1935 struct heap_page *page = heap_pages_sorted[i];
1937 if (page->flags.in_tomb && page->free_slots == page->total_slots) {
1938 heap_unlink_page(objspace, SIZE_POOL_TOMB_HEAP(page->size_pool), page);
1939 heap_page_free(objspace, page);
1943 heap_pages_sorted[j] = page;
1949 struct heap_page *hipage = heap_pages_sorted[heap_allocated_pages - 1];
1950 uintptr_t himem = (uintptr_t)hipage->start + (hipage->total_slots * hipage->slot_size);
1951 GC_ASSERT(himem <= (uintptr_t)heap_pages_himem);
1952 heap_pages_himem = (
RVALUE *)himem;
1954 GC_ASSERT(j == heap_allocated_pages);
1961 uintptr_t start, end, p;
1964 uintptr_t hi, lo, mid;
1965 size_t stride = size_pool->slot_size;
1966 unsigned int limit = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)))/(
int)stride;
1969 page_body = (
struct heap_page_body *)rb_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1970 if (page_body == 0) {
1975 page = calloc1(
sizeof(
struct heap_page));
1977 rb_aligned_free(page_body, HEAP_PAGE_SIZE);
1984 if ((VALUE)start %
sizeof(
RVALUE) != 0) {
1985 int delta = (int)
sizeof(
RVALUE) - (start % (int)
sizeof(
RVALUE));
1986 start = start + delta;
1987 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
1993 if (NUM_IN_PAGE(start) == 1) {
1994 start += stride -
sizeof(
RVALUE);
1997 GC_ASSERT(NUM_IN_PAGE(start) *
sizeof(
RVALUE) % stride == 0);
1999 limit = (HEAP_PAGE_SIZE - (int)(start - (uintptr_t)page_body))/(int)stride;
2001 end = start + (limit * (int)stride);
2005 hi = (uintptr_t)heap_allocated_pages;
2009 mid = (lo + hi) / 2;
2010 mid_page = heap_pages_sorted[mid];
2011 if ((uintptr_t)mid_page->start < start) {
2014 else if ((uintptr_t)mid_page->start > start) {
2018 rb_bug(
"same heap page is allocated: %p at %"PRIuVALUE, (
void *)page_body, (VALUE)mid);
2022 if (hi < (uintptr_t)heap_allocated_pages) {
2023 MEMMOVE(&heap_pages_sorted[hi+1], &heap_pages_sorted[hi],
struct heap_page_header*, heap_allocated_pages - hi);
2026 heap_pages_sorted[hi] = page;
2028 heap_allocated_pages++;
2030 GC_ASSERT(heap_eden_total_pages(objspace) + heap_allocatable_pages(objspace) <= heap_pages_sorted_length);
2031 GC_ASSERT(heap_eden_total_pages(objspace) + heap_tomb_total_pages(objspace) == heap_allocated_pages - 1);
2032 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2034 objspace->profile.total_allocated_pages++;
2036 if (heap_allocated_pages > heap_pages_sorted_length) {
2037 rb_bug(
"heap_page_allocate: allocated(%"PRIdSIZE
") > sorted(%"PRIdSIZE
")",
2038 heap_allocated_pages, heap_pages_sorted_length);
2041 if (heap_pages_lomem == 0 || (uintptr_t)heap_pages_lomem > start) heap_pages_lomem = (
RVALUE *)start;
2042 if ((uintptr_t)heap_pages_himem < end) heap_pages_himem = (
RVALUE *)end;
2044 page->start = (
RVALUE *)start;
2045 page->total_slots = limit;
2046 page->slot_size = size_pool->slot_size;
2047 page->size_pool = size_pool;
2048 page_body->header.page = page;
2050 for (p = start; p != end; p += stride) {
2051 gc_report(3, objspace,
"assign_heap_page: %p is added to freelist\n", (
void *)p);
2052 heap_page_add_freeobj(objspace, page, (VALUE)p);
2054 page->free_slots = limit;
2056 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
2065 list_for_each_safe(&SIZE_POOL_TOMB_HEAP(size_pool)->pages, page, next, page_node) {
2066 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
2067 if (page->freelist != NULL) {
2068 heap_unlink_page(objspace, &size_pool->tomb_heap, page);
2069 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
2081 const char *method =
"recycle";
2083 size_pool->allocatable_pages--;
2085 page = heap_page_resurrect(objspace, size_pool);
2088 page = heap_page_allocate(objspace, size_pool);
2089 method =
"allocate";
2091 if (0) fprintf(stderr,
"heap_page_create: %s - %p, "
2092 "heap_allocated_pages: %"PRIdSIZE
", "
2093 "heap_allocated_pages: %"PRIdSIZE
", "
2094 "tomb->total_pages: %"PRIdSIZE
"\n",
2095 method, (
void *)page, heap_pages_sorted_length, heap_allocated_pages, SIZE_POOL_TOMB_HEAP(size_pool)->total_pages);
2103 GC_ASSERT(!(heap == SIZE_POOL_EDEN_HEAP(size_pool) && heap->sweeping_page));
2104 page->flags.in_tomb = (heap == SIZE_POOL_TOMB_HEAP(size_pool));
2105 list_add_tail(&heap->pages, &page->page_node);
2106 heap->total_pages++;
2107 heap->total_slots += page->total_slots;
2113 struct heap_page *page = heap_page_create(objspace, size_pool);
2114 heap_add_page(objspace, size_pool, heap, page);
2115 heap_add_freepage(heap, page);
2123 size_pool_allocatable_pages_set(objspace, size_pool, add);
2125 for (i = 0; i < add; i++) {
2126 heap_assign_page(objspace, size_pool, heap);
2129 GC_ASSERT(size_pool->allocatable_pages == 0);
2133heap_extend_pages(
rb_objspace_t *objspace,
size_t free_slots,
size_t total_slots,
size_t used)
2135 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
2138 if (goal_ratio == 0.0) {
2139 next_used = (size_t)(used * gc_params.growth_factor);
2145 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
2147 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
2148 if (f < 1.0) f = 1.1;
2150 next_used = (size_t)(f * used);
2154 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
2155 " G(%1.2f), f(%1.2f),"
2156 " used(%8"PRIuSIZE
") => next_used(%8"PRIuSIZE
")\n",
2157 free_slots, total_slots, free_slots/(
double)total_slots,
2158 goal_ratio, f, used, next_used);
2162 if (gc_params.growth_max_slots > 0) {
2163 size_t max_used = (size_t)(used + gc_params.growth_max_slots/HEAP_PAGE_OBJ_LIMIT);
2164 if (next_used > max_used) next_used = max_used;
2167 size_t extend_page_count = next_used - used;
2169 if (extend_page_count == 0) extend_page_count = 1;
2171 return extend_page_count;
2177 if (size_pool->allocatable_pages > 0) {
2178 gc_report(1, objspace,
"heap_increment: heap_pages_sorted_length: %"PRIdSIZE
", "
2179 "heap_pages_inc: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2180 heap_pages_sorted_length, size_pool->allocatable_pages, heap->total_pages);
2182 GC_ASSERT(heap_allocatable_pages(objspace) + heap_eden_total_pages(objspace) <= heap_pages_sorted_length);
2183 GC_ASSERT(heap_allocated_pages <= heap_pages_sorted_length);
2185 heap_assign_page(objspace, size_pool, heap);
2194 GC_ASSERT(heap->free_pages == NULL);
2196 if (is_lazy_sweeping(objspace)) {
2197 gc_sweep_continue(objspace, size_pool, heap);
2199 else if (is_incremental_marking(objspace)) {
2200 gc_marks_continue(objspace, size_pool, heap);
2203 if (heap->free_pages == NULL &&
2204 (will_be_incremental_marking(objspace) || heap_increment(objspace, size_pool, heap) == FALSE) &&
2205 gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2211rb_objspace_set_event_hook(
const rb_event_flag_t event)
2215 objspace->flags.has_hook = (objspace->hook_events != 0);
2221 const VALUE *pc = ec->cfp->pc;
2222 if (pc && VM_FRAME_RUBYFRAME_P(ec->cfp)) {
2226 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, data);
2230#define gc_event_hook_available_p(objspace) ((objspace)->flags.has_hook)
2231#define gc_event_hook_needed_p(objspace, event) ((objspace)->hook_events & (event))
2233#define gc_event_hook_prep(objspace, event, data, prep) do { \
2234 if (UNLIKELY(gc_event_hook_needed_p(objspace, event))) { \
2236 gc_event_hook_body(GET_EC(), (objspace), (event), (data)); \
2240#define gc_event_hook(objspace, event, data) gc_event_hook_prep(objspace, event, data, (void)0)
2243newobj_init(VALUE klass, VALUE flags,
int wb_protected,
rb_objspace_t *objspace, VALUE obj)
2245#if !__has_feature(memory_sanitizer)
2250 p->as.basic.
flags = flags;
2251 *((VALUE *)&p->as.basic.
klass) = klass;
2253#if RACTOR_CHECK_MODE
2254 rb_ractor_setup_belonging(obj);
2257#if RGENGC_CHECK_MODE
2258 p->as.values.v1 = p->as.values.v2 = p->as.values.v3 = 0;
2260 RB_VM_LOCK_ENTER_NO_BARRIER();
2262 check_rvalue_consistency(obj);
2264 GC_ASSERT(RVALUE_MARKED(obj) == FALSE);
2265 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
2266 GC_ASSERT(RVALUE_OLD_P(obj) == FALSE);
2267 GC_ASSERT(RVALUE_WB_UNPROTECTED(obj) == FALSE);
2270 if (RVALUE_AGE(obj) != 2)
rb_bug(
"newobj: %s of age (%d) != 2.", obj_info(obj), RVALUE_AGE(obj));
2273 if (RVALUE_AGE(obj) > 0)
rb_bug(
"newobj: %s of age (%d) > 0.", obj_info(obj), RVALUE_AGE(obj));
2275 if (rgengc_remembered(objspace, (VALUE)obj))
rb_bug(
"newobj: %s is remembered.", obj_info(obj));
2277 RB_VM_LOCK_LEAVE_NO_BARRIER();
2280 if (UNLIKELY(wb_protected == FALSE)) {
2281 ASSERT_vm_locking();
2282 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2286 objspace->total_allocated_objects++;
2290 objspace->profile.total_generated_normal_object_count++;
2291#if RGENGC_PROFILE >= 2
2292 objspace->profile.generated_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
2296 objspace->profile.total_generated_shady_object_count++;
2297#if RGENGC_PROFILE >= 2
2298 objspace->profile.generated_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
2304 RANY(obj)->file = rb_source_location_cstr(&RANY(obj)->line);
2308 gc_report(5, objspace,
"newobj: %s\n", obj_info(obj));
2310#if RGENGC_OLD_NEWOBJ_CHECK > 0
2312 static int newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2314 if (!is_incremental_marking(objspace) &&
2317 if (--newobj_cnt == 0) {
2318 newobj_cnt = RGENGC_OLD_NEWOBJ_CHECK;
2320 gc_mark_set(objspace, obj);
2321 RVALUE_AGE_SET_OLD(objspace, obj);
2323 rb_gc_writebarrier_remember(obj);
2334static inline void ractor_set_cache(
rb_ractor_t *cr,
struct heap_page *page,
size_t size_pool_idx);
2337rb_gc_obj_slot_size(VALUE obj)
2339 return GET_HEAP_PAGE(obj)->slot_size;
2343size_pool_slot_size(
unsigned char pool_id)
2345 GC_ASSERT(pool_id < SIZE_POOL_COUNT);
2347 size_t slot_size = (1 << pool_id) *
sizeof(
RVALUE);
2349#if RGENGC_CHECK_MODE
2351 GC_ASSERT(size_pools[pool_id].slot_size == (
short)slot_size);
2358rb_gc_size_allocatable_p(
size_t size)
2360 return size <= size_pool_slot_size(SIZE_POOL_COUNT - 1);
2367 RVALUE *p = cache->freelist;
2370 VALUE obj = (
VALUE)p;
2371 cache->freelist = p->as.free.next;
2372 asan_unpoison_object(obj,
true);
2373#if RGENGC_CHECK_MODE
2375 MEMZERO((
char *)obj,
char, size_pool_slot_size(size_pool_idx));
2387 ASSERT_vm_locking();
2391 while (heap->free_pages == NULL) {
2392 heap_prepare(objspace, size_pool, heap);
2394 page = heap->free_pages;
2395 heap->free_pages = page->free_next;
2397 GC_ASSERT(page->free_slots != 0);
2398 RUBY_DEBUG_LOG(
"page:%p freelist:%p cnt:%d", (
void *)page, (
void *)page->freelist, page->free_slots);
2400 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
2408 gc_report(3, &
rb_objspace,
"ractor_set_cache: Using page %p\n", (
void *)GET_PAGE_BODY(page->start));
2412 cache->using_page = page;
2413 cache->freelist = page->freelist;
2414 page->free_slots = 0;
2415 page->freelist = NULL;
2417 asan_unpoison_object((VALUE)cache->freelist,
false);
2419 asan_poison_object((VALUE)cache->freelist);
2425 ASSERT_vm_locking();
2428 struct heap_page *page = heap_next_freepage(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
2430 ractor_set_cache(cr, page, size_pool_idx);
2434newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2437 p->as.values.v1 = v1;
2438 p->as.values.v2 = v2;
2439 p->as.values.v3 = v3;
2444size_pool_idx_for_size(
size_t size)
2447 size_t slot_count = CEILDIV(size,
sizeof(
RVALUE));
2450 size_t size_pool_idx = 64 - nlz_int64(slot_count - 1);
2451 if (size_pool_idx >= SIZE_POOL_COUNT) {
2452 rb_bug(
"size_pool_idx_for_size: allocation size too large");
2455 return size_pool_idx;
2457 GC_ASSERT(size <=
sizeof(
RVALUE));
2462ALWAYS_INLINE(
static VALUE newobj_slowpath(VALUE klass, VALUE flags,
rb_objspace_t *objspace,
rb_ractor_t *cr,
int wb_protected,
size_t size_pool_idx));
2465newobj_slowpath(VALUE klass, VALUE flags,
rb_objspace_t *objspace,
rb_ractor_t *cr,
int wb_protected,
size_t size_pool_idx)
2470 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
2472 if (UNLIKELY(during_gc || ruby_gc_stressful)) {
2476 rb_bug(
"object allocation during garbage collection phase");
2479 if (ruby_gc_stressful) {
2480 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2487 while ((obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) ==
Qfalse) {
2488 ractor_cache_slots(objspace, cr, size_pool_idx);
2490 GC_ASSERT(obj != 0);
2491 newobj_init(klass, flags, wb_protected, objspace, obj);
2495 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
2500NOINLINE(
static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2502NOINLINE(
static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2506newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
rb_objspace_t *objspace,
rb_ractor_t *cr,
size_t size_pool_idx)
2508 return newobj_slowpath(klass, flags, objspace, cr, TRUE, size_pool_idx);
2512newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
rb_objspace_t *objspace,
rb_ractor_t *cr,
size_t size_pool_idx)
2514 return newobj_slowpath(klass, flags, objspace, cr, FALSE, size_pool_idx);
2518newobj_of0(VALUE klass, VALUE flags,
int wb_protected,
rb_ractor_t *cr,
size_t alloc_size)
2523 RB_DEBUG_COUNTER_INC(obj_newobj);
2524 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2526#if GC_DEBUG_STRESS_TO_CLASS
2527 if (UNLIKELY(stress_to_class)) {
2529 for (i = 0; i < cnt; ++i) {
2535 size_t size_pool_idx = size_pool_idx_for_size(alloc_size);
2537 if ((!UNLIKELY(during_gc ||
2538 ruby_gc_stressful ||
2539 gc_event_hook_available_p(objspace)) &&
2541 (obj = ractor_cached_free_region(objspace, cr, size_pool_idx)) !=
Qfalse)) {
2543 newobj_init(klass, flags, wb_protected, objspace, obj);
2546 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2548 obj = wb_protected ?
2549 newobj_slowpath_wb_protected(klass, flags, objspace, cr, size_pool_idx) :
2550 newobj_slowpath_wb_unprotected(klass, flags, objspace, cr, size_pool_idx);
2557newobj_of(VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3,
int wb_protected,
size_t alloc_size)
2559 VALUE obj = newobj_of0(klass, flags, wb_protected, GET_RACTOR(), alloc_size);
2560 return newobj_fill(obj, v1, v2, v3);
2564newobj_of_cr(
rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3,
int wb_protected,
size_t alloc_size)
2566 VALUE obj = newobj_of0(klass, flags, wb_protected, cr, alloc_size);
2567 return newobj_fill(obj, v1, v2, v3);
2571rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags,
size_t size)
2574 return newobj_of(klass, flags, 0, 0, 0, FALSE, size);
2578rb_wb_protected_newobj_of(VALUE klass, VALUE flags,
size_t size)
2581 return newobj_of(klass, flags, 0, 0, 0, TRUE, size);
2588 return newobj_of_cr(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
2596 return newobj_of(0,
T_NONE, 0, 0, 0, FALSE,
sizeof(
RVALUE));
2603 st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
2608 rb_init_iv_list(obj);
2617#define UNEXPECTED_NODE(func) \
2618 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
2619 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
2622rb_imemo_name(
enum imemo_type
type)
2626#define IMEMO_NAME(x) case imemo_##x: return #x;
2630 IMEMO_NAME(throw_data);
2637 IMEMO_NAME(parser_strterm);
2638 IMEMO_NAME(callinfo);
2639 IMEMO_NAME(callcache);
2640 IMEMO_NAME(constcache);
2649rb_imemo_new(
enum imemo_type
type, VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2651 size_t size =
sizeof(
RVALUE);
2653 return newobj_of(v0, flags, v1, v2, v3, TRUE, size);
2657rb_imemo_tmpbuf_new(VALUE v1, VALUE v2, VALUE v3, VALUE v0)
2659 size_t size =
sizeof(
RVALUE);
2661 return newobj_of(v0, flags, v1, v2, v3, FALSE, size);
2665rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(
void *buf,
size_t cnt)
2667 return rb_imemo_tmpbuf_new((VALUE)buf, 0, (VALUE)cnt, 0);
2673 return (
rb_imemo_tmpbuf_t *)rb_imemo_tmpbuf_new((VALUE)buf, (VALUE)old_heap, (VALUE)cnt, 0);
2677imemo_memsize(VALUE obj)
2680 switch (imemo_type(obj)) {
2682 size +=
sizeof(RANY(obj)->as.imemo.ment.def);
2685 size += rb_iseq_memsize((
rb_iseq_t *)obj);
2688 size += RANY(obj)->as.imemo.env.env_size *
sizeof(
VALUE);
2691 size += RANY(obj)->as.imemo.alloc.cnt *
sizeof(
VALUE);
2694 size += rb_ast_memsize(&RANY(obj)->as.imemo.ast);
2698 case imemo_throw_data:
2701 case imemo_parser_strterm:
2712rb_imemo_new_debug(
enum imemo_type
type, VALUE v1, VALUE v2, VALUE v3, VALUE v0,
const char *file,
int line)
2714 VALUE memo = rb_imemo_new(
type, v1, v2, v3, v0);
2715 fprintf(stderr,
"memo %p (type: %d) @ %s:%d\n", (
void *)memo, imemo_type(memo), file, line);
2721rb_class_allocate_instance(VALUE klass)
2723 st_table *index_tbl = RCLASS_IV_INDEX_TBL(klass);
2725 VALUE flags =
T_OBJECT | ROBJECT_EMBED;
2730 rb_init_iv_list(obj);
2737rb_data_object_check(VALUE klass)
2739 if (klass != rb_cObject && (
rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
2741#if RUBY_VERSION_SINCE(3, 2)
2742 RBIMPL_TODO(
"enable the warning at this release");
2743 rb_warn(
"undefining the allocator of T_DATA class %"PRIsVALUE, klass);
2752 if (klass) rb_data_object_check(klass);
2753 return newobj_of(klass,
T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, FALSE,
sizeof(
RVALUE));
2767 RBIMPL_NONNULL_ARG(
type);
2768 if (klass) rb_data_object_check(klass);
2781rb_objspace_data_type_memsize(VALUE obj)
2786 if (ptr &&
type->function.dsize) {
2787 return type->function.dsize(ptr);
2794rb_objspace_data_type_name(VALUE obj)
2804PUREFUNC(
static inline int is_pointer_to_heap(
rb_objspace_t *objspace,
void *ptr);)
2808 register RVALUE *p = RANY(ptr);
2810 register size_t hi, lo, mid;
2812 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2814 if (p < heap_pages_lomem || p > heap_pages_himem)
return FALSE;
2815 RB_DEBUG_COUNTER_INC(gc_isptr_range);
2817 if ((VALUE)p %
sizeof(
RVALUE) != 0)
return FALSE;
2818 RB_DEBUG_COUNTER_INC(gc_isptr_align);
2822 hi = heap_allocated_pages;
2824 mid = (lo + hi) / 2;
2825 page = heap_pages_sorted[mid];
2826 if (page->start <= p) {
2827 if ((uintptr_t)p < ((uintptr_t)page->start + (page->total_slots * page->slot_size))) {
2828 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2830 if (page->flags.in_tomb) {
2834 if ((NUM_IN_PAGE(p) *
sizeof(
RVALUE)) % page->slot_size != 0)
return FALSE;
2848static enum rb_id_table_iterator_result
2849free_const_entry_i(VALUE value,
void *data)
2853 return ID_TABLE_CONTINUE;
2859 rb_id_table_foreach_values(tbl, free_const_entry_i, 0);
2860 rb_id_table_free(tbl);
2864free_iv_index_tbl_free_i(st_data_t key, st_data_t value, st_data_t data)
2866 xfree((
void *)value);
2871iv_index_tbl_free(
struct st_table *tbl)
2873 st_foreach(tbl, free_iv_index_tbl_free_i, 0);
2883 for (
int i=0; i<ccs->len; i++) {
2886 void *ptr = asan_poisoned_object_p((VALUE)cc);
2887 asan_unpoison_object((VALUE)cc,
false);
2889 if (is_pointer_to_heap(objspace, (
void *)cc) &&
2890 IMEMO_TYPE_P(cc, imemo_callcache) &&
2891 cc->klass == klass) {
2896 asan_poison_object((VALUE)cc);
2901 asan_poison_object((VALUE)cc);
2904 vm_cc_invalidate(cc);
2914 RB_DEBUG_COUNTER_INC(ccs_free);
2915 vm_ccs_free(ccs, TRUE, NULL,
Qundef);
2924static enum rb_id_table_iterator_result
2925cc_table_mark_i(ID
id, VALUE ccs_ptr,
void *data_ptr)
2929 VM_ASSERT(vm_ccs_p(ccs));
2930 VM_ASSERT(
id == ccs->cme->called_id);
2932 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2933 rb_vm_ccs_free(ccs);
2934 return ID_TABLE_DELETE;
2937 gc_mark(data->objspace, (VALUE)ccs->cme);
2939 for (
int i=0; i<ccs->len; i++) {
2940 VM_ASSERT(data->klass == ccs->entries[i].cc->klass);
2941 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
2943 gc_mark(data->objspace, (VALUE)ccs->entries[i].ci);
2944 gc_mark(data->objspace, (VALUE)ccs->entries[i].cc);
2946 return ID_TABLE_CONTINUE;
2953 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2956 .objspace = objspace,
2959 rb_id_table_foreach(cc_tbl, cc_table_mark_i, &data);
2963static enum rb_id_table_iterator_result
2964cc_table_free_i(VALUE ccs_ptr,
void *data_ptr)
2968 VM_ASSERT(vm_ccs_p(ccs));
2969 vm_ccs_free(ccs, data->alive, data->objspace, data->klass);
2970 return ID_TABLE_CONTINUE;
2974cc_table_free(
rb_objspace_t *objspace, VALUE klass,
bool alive)
2976 struct rb_id_table *cc_tbl = RCLASS_CC_TBL(klass);
2980 .objspace = objspace,
2984 rb_id_table_foreach_values(cc_tbl, cc_table_free_i, &data);
2985 rb_id_table_free(cc_tbl);
2989static enum rb_id_table_iterator_result
2990cvar_table_free_i(VALUE value,
void * ctx)
2992 xfree((
void *) value);
2993 return ID_TABLE_CONTINUE;
2997rb_cc_table_free(VALUE klass)
3003make_zombie(
rb_objspace_t *objspace, VALUE obj,
void (*dfree)(
void *),
void *data)
3005 struct RZombie *zombie = RZOMBIE(obj);
3007 zombie->dfree = dfree;
3008 zombie->data = data;
3009 zombie->next = heap_pages_deferred_final;
3010 heap_pages_deferred_final = (
VALUE)zombie;
3012 struct heap_page *page = GET_HEAP_PAGE(obj);
3013 page->final_slots++;
3014 heap_pages_final_slots++;
3020 rb_io_t *fptr = RANY(obj)->as.file.fptr;
3021 make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
3027 ASSERT_vm_locking();
3028 st_data_t o = (st_data_t)obj,
id;
3033 if (st_delete(objspace->obj_to_id_tbl, &o, &
id)) {
3035 st_delete(objspace->id_to_obj_tbl, &
id, NULL);
3038 rb_bug(
"Object ID seen, but not in mapping table: %s\n", obj_info(obj));
3045 RB_DEBUG_COUNTER_INC(obj_free);
3055 rb_bug(
"obj_free() called for broken object");
3067 obj_free_object_id(objspace, obj);
3070 if (RVALUE_WB_UNPROTECTED(obj)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
3072#if RGENGC_CHECK_MODE
3073#define CHECK(x) if (x(obj) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", obj_info(obj))
3074 CHECK(RVALUE_WB_UNPROTECTED);
3075 CHECK(RVALUE_MARKED);
3076 CHECK(RVALUE_MARKING);
3077 CHECK(RVALUE_UNCOLLECTIBLE);
3083 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
3084 RB_DEBUG_COUNTER_INC(obj_obj_embed);
3086 else if (ROBJ_TRANSIENT_P(obj)) {
3087 RB_DEBUG_COUNTER_INC(obj_obj_transient);
3090 xfree(RANY(obj)->as.object.as.heap.ivptr);
3091 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
3096 rb_id_table_free(RCLASS_M_TBL(obj));
3097 cc_table_free(objspace, obj, FALSE);
3098 if (RCLASS_IV_TBL(obj)) {
3099 st_free_table(RCLASS_IV_TBL(obj));
3101 if (RCLASS_CONST_TBL(obj)) {
3102 rb_free_const_table(RCLASS_CONST_TBL(obj));
3104 if (RCLASS_IV_INDEX_TBL(obj)) {
3105 iv_index_tbl_free(RCLASS_IV_INDEX_TBL(obj));
3107 if (RCLASS_CVC_TBL(obj)) {
3108 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
3109 rb_id_table_free(RCLASS_CVC_TBL(obj));
3111 rb_class_remove_subclass_head(obj);
3112 rb_class_remove_from_module_subclasses(obj);
3113 rb_class_remove_from_super_subclasses(obj);
3115 if (RCLASS_EXT(obj))
3116 xfree(RCLASS_EXT(obj));
3129#if USE_DEBUG_COUNTER
3132 RB_DEBUG_COUNTER_INC(obj_hash_empty);
3135 RB_DEBUG_COUNTER_INC(obj_hash_1);
3138 RB_DEBUG_COUNTER_INC(obj_hash_2);
3141 RB_DEBUG_COUNTER_INC(obj_hash_3);
3144 RB_DEBUG_COUNTER_INC(obj_hash_4);
3150 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
3154 RB_DEBUG_COUNTER_INC(obj_hash_g8);
3157 if (RHASH_AR_TABLE_P(obj)) {
3158 if (RHASH_AR_TABLE(obj) == NULL) {
3159 RB_DEBUG_COUNTER_INC(obj_hash_null);
3162 RB_DEBUG_COUNTER_INC(obj_hash_ar);
3166 RB_DEBUG_COUNTER_INC(obj_hash_st);
3173 if (RHASH_TRANSIENT_P(obj)) {
3174 RB_DEBUG_COUNTER_INC(obj_hash_transient);
3182 GC_ASSERT(RHASH_ST_TABLE_P(obj));
3183 st_free_table(RHASH(obj)->as.st);
3187 if (RANY(obj)->as.regexp.ptr) {
3188 onig_free(RANY(obj)->as.regexp.ptr);
3189 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
3194 int free_immediately = FALSE;
3195 void (*dfree)(
void *);
3199 free_immediately = (RANY(obj)->as.typeddata.type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
3200 dfree = RANY(obj)->as.typeddata.type->function.dfree;
3201 if (0 && free_immediately == 0) {
3203 fprintf(stderr,
"not immediate -> %s\n", RANY(obj)->as.typeddata.type->wrap_struct_name);
3207 dfree = RANY(obj)->as.data.dfree;
3213 RB_DEBUG_COUNTER_INC(obj_data_xfree);
3215 else if (free_immediately) {
3217 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
3220 make_zombie(objspace, obj, dfree, data);
3221 RB_DEBUG_COUNTER_INC(obj_data_zombie);
3226 RB_DEBUG_COUNTER_INC(obj_data_empty);
3231 if (RANY(obj)->as.match.rmatch) {
3232 struct rmatch *rm = RANY(obj)->as.match.rmatch;
3233#if USE_DEBUG_COUNTER
3234 if (rm->
regs.num_regs >= 8) {
3235 RB_DEBUG_COUNTER_INC(obj_match_ge8);
3237 else if (rm->
regs.num_regs >= 4) {
3238 RB_DEBUG_COUNTER_INC(obj_match_ge4);
3240 else if (rm->
regs.num_regs >= 1) {
3241 RB_DEBUG_COUNTER_INC(obj_match_under4);
3244 onig_region_free(&rm->
regs, 0);
3249 RB_DEBUG_COUNTER_INC(obj_match_ptr);
3253 if (RANY(obj)->as.file.fptr) {
3254 make_io_zombie(objspace, obj);
3255 RB_DEBUG_COUNTER_INC(obj_file_ptr);
3260 RB_DEBUG_COUNTER_INC(obj_rational);
3263 RB_DEBUG_COUNTER_INC(obj_complex);
3269 if (RICLASS_OWNS_M_TBL_P(obj)) {
3271 rb_id_table_free(RCLASS_M_TBL(obj));
3273 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
3274 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
3276 rb_class_remove_subclass_head(obj);
3277 cc_table_free(objspace, obj, FALSE);
3278 rb_class_remove_from_module_subclasses(obj);
3279 rb_class_remove_from_super_subclasses(obj);
3281 xfree(RCLASS_EXT(obj));
3284 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
3288 RB_DEBUG_COUNTER_INC(obj_float);
3292 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
3293 xfree(BIGNUM_DIGITS(obj));
3294 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
3297 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
3302 UNEXPECTED_NODE(obj_free);
3306 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
3307 RANY(obj)->as.rstruct.as.heap.ptr == NULL) {
3308 RB_DEBUG_COUNTER_INC(obj_struct_embed);
3310 else if (RSTRUCT_TRANSIENT_P(obj)) {
3311 RB_DEBUG_COUNTER_INC(obj_struct_transient);
3314 xfree((
void *)RANY(obj)->as.rstruct.as.heap.ptr);
3315 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
3321 rb_gc_free_dsymbol(obj);
3322 RB_DEBUG_COUNTER_INC(obj_symbol);
3327 switch (imemo_type(obj)) {
3329 rb_free_method_entry(&RANY(obj)->as.imemo.ment);
3330 RB_DEBUG_COUNTER_INC(obj_imemo_ment);
3333 rb_iseq_free(&RANY(obj)->as.imemo.iseq);
3334 RB_DEBUG_COUNTER_INC(obj_imemo_iseq);
3337 GC_ASSERT(VM_ENV_ESCAPED_P(RANY(obj)->as.imemo.env.ep));
3338 xfree((VALUE *)RANY(obj)->as.imemo.env.env);
3339 RB_DEBUG_COUNTER_INC(obj_imemo_env);
3342 xfree(RANY(obj)->as.imemo.alloc.ptr);
3343 RB_DEBUG_COUNTER_INC(obj_imemo_tmpbuf);
3346 rb_ast_free(&RANY(obj)->as.imemo.ast);
3347 RB_DEBUG_COUNTER_INC(obj_imemo_ast);
3350 RB_DEBUG_COUNTER_INC(obj_imemo_cref);
3353 RB_DEBUG_COUNTER_INC(obj_imemo_svar);
3355 case imemo_throw_data:
3356 RB_DEBUG_COUNTER_INC(obj_imemo_throw_data);
3359 RB_DEBUG_COUNTER_INC(obj_imemo_ifunc);
3362 RB_DEBUG_COUNTER_INC(obj_imemo_memo);
3364 case imemo_parser_strterm:
3365 RB_DEBUG_COUNTER_INC(obj_imemo_parser_strterm);
3367 case imemo_callinfo:
3368 RB_DEBUG_COUNTER_INC(obj_imemo_callinfo);
3370 case imemo_callcache:
3371 RB_DEBUG_COUNTER_INC(obj_imemo_callcache);
3373 case imemo_constcache:
3374 RB_DEBUG_COUNTER_INC(obj_imemo_constcache);
3380 rb_bug(
"gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
3385 make_zombie(objspace, obj, 0, 0);
3394#define OBJ_ID_INCREMENT (sizeof(RVALUE) / 2)
3395#define OBJ_ID_INITIAL (OBJ_ID_INCREMENT * 2)
3398object_id_cmp(st_data_t x, st_data_t y)
3400 if (RB_BIGNUM_TYPE_P(x)) {
3409object_id_hash(st_data_t n)
3411 if (RB_BIGNUM_TYPE_P(n)) {
3415 return st_numhash(n);
3418static const struct st_hash_type object_id_hash_type = {
3428#if defined(HAVE_MMAP) && !HAVE_CONST_PAGE_SIZE && !defined(PAGE_MAX_SIZE)
3432 use_mmap_aligned_alloc = PAGE_SIZE <= HEAP_PAGE_SIZE;
3433# elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
3435 use_mmap_aligned_alloc = sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE;
3438 use_mmap_aligned_alloc = FALSE;
3442 objspace->next_object_id =
INT2FIX(OBJ_ID_INITIAL);
3443 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
3444 objspace->obj_to_id_tbl = st_init_numtable();
3446#if RGENGC_ESTIMATE_OLDMALLOC
3447 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
3450 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT);
3453 for (
int i = 1; i < SIZE_POOL_COUNT; i++) {
3455 int multiple = size_pool->slot_size /
sizeof(
RVALUE);
3456 size_pool->allocatable_pages = gc_params.heap_init_slots * multiple / HEAP_PAGE_OBJ_LIMIT;
3458 heap_pages_expand_sorted(objspace);
3460 init_mark_stack(&objspace->mark_stack);
3462 objspace->profile.invoke_time = getrusage_time();
3463 finalizer_table = st_init_numtable();
3471 gc_stress_set(objspace, ruby_initial_gc_stress);
3474typedef int each_obj_callback(
void *,
void *,
size_t,
void *);
3476static void objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected);
3477static void objspace_reachable_objects_from_root(
rb_objspace_t *,
void (func)(
const char *, VALUE,
void *),
void *);
3481 bool reenable_incremental;
3483 each_obj_callback *callback;
3486 struct heap_page **pages[SIZE_POOL_COUNT];
3487 size_t pages_counts[SIZE_POOL_COUNT];
3491objspace_each_objects_ensure(VALUE arg)
3497 if (data->reenable_incremental) {
3498 objspace->flags.dont_incremental = FALSE;
3501 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3502 struct heap_page **pages = data->pages[i];
3514objspace_each_objects_try(VALUE arg)
3520 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3522 size_t size = size_mul_or_raise(SIZE_POOL_EDEN_HEAP(size_pool)->total_pages,
sizeof(
struct heap_page *), rb_eRuntimeError);
3524 struct heap_page **pages = malloc(size);
3533 size_t pages_count = 0;
3534 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
3535 pages[pages_count] = page;
3538 data->pages[i] = pages;
3539 data->pages_counts[i] = pages_count;
3540 GC_ASSERT(pages_count == SIZE_POOL_EDEN_HEAP(size_pool)->total_pages);
3543 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
3545 size_t pages_count = data->pages_counts[i];
3546 struct heap_page **pages = data->pages[i];
3548 struct heap_page *page = list_top(&SIZE_POOL_EDEN_HEAP(size_pool)->pages,
struct heap_page, page_node);
3549 for (
size_t i = 0; i < pages_count; i++) {
3552 if (page == NULL)
break;
3556 if (pages[i] != page)
continue;
3558 uintptr_t pstart = (uintptr_t)page->start;
3559 uintptr_t pend = pstart + (page->total_slots * size_pool->slot_size);
3561 if ((*data->callback)((
void *)pstart, (
void *)pend, size_pool->slot_size, data->data)) {
3565 page = list_next(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node);
3609rb_objspace_each_objects(each_obj_callback *callback,
void *data)
3611 objspace_each_objects(&
rb_objspace, callback, data, TRUE);
3615objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected)
3618 bool reenable_incremental = FALSE;
3620 reenable_incremental = !objspace->flags.dont_incremental;
3623 objspace->flags.dont_incremental = TRUE;
3627 .objspace = objspace,
3628 .reenable_incremental = reenable_incremental,
3630 .callback = callback,
3634 .pages_counts = {0},
3641rb_objspace_each_objects_without_setup(each_obj_callback *callback,
void *data)
3643 objspace_each_objects(&
rb_objspace, callback, data, FALSE);
3652internal_object_p(VALUE obj)
3655 void *ptr = __asan_region_is_poisoned(p,
SIZEOF_VALUE);
3656 asan_unpoison_object(obj,
false);
3657 bool used_p = p->as.basic.
flags;
3662 UNEXPECTED_NODE(internal_object_p);
3671 if (!p->as.basic.
klass)
break;
3673 return rb_singleton_class_internal_p(obj);
3677 if (!p->as.basic.
klass)
break;
3681 if (ptr || ! used_p) {
3682 asan_poison_object(obj);
3688rb_objspace_internal_object_p(VALUE obj)
3690 return internal_object_p(obj);
3694os_obj_of_i(
void *vstart,
void *vend,
size_t stride,
void *data)
3698 VALUE v = (
VALUE)vstart;
3699 for (; v != (
VALUE)vend; v += stride) {
3700 if (!internal_object_p(v)) {
3701 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
3720 rb_objspace_each_objects(os_obj_of_i, &oes);
3761os_each_obj(
int argc, VALUE *argv, VALUE os)
3767 return os_obj_of(of);
3779undefine_final(VALUE os, VALUE obj)
3788 st_data_t data = obj;
3790 st_delete(finalizer_table, &data, 0);
3796should_be_callable(VALUE block)
3799 rb_raise(rb_eArgError,
"wrong type argument %"PRIsVALUE
" (should be callable)",
3800 rb_obj_class(block));
3805should_be_finalizable(VALUE obj)
3808 rb_raise(rb_eArgError,
"cannot define finalizer for %s",
3877define_final(
int argc, VALUE *argv, VALUE os)
3882 should_be_finalizable(obj);
3887 should_be_callable(block);
3890 if (rb_callable_receiver(block) == obj) {
3891 rb_warn(
"finalizer references object to be finalized");
3894 return define_final0(obj, block);
3898define_final0(VALUE obj, VALUE block)
3906 if (st_lookup(finalizer_table, obj, &data)) {
3907 table = (
VALUE)data;
3914 for (i = 0; i < len; i++) {
3916 if (rb_equal(recv, block)) {
3927 RBASIC_CLEAR_CLASS(table);
3928 st_add_direct(finalizer_table, obj, table);
3939 should_be_finalizable(obj);
3940 should_be_callable(block);
3941 return define_final0(obj, block);
3952 if (st_lookup(finalizer_table, obj, &data)) {
3953 table = (
VALUE)data;
3954 st_insert(finalizer_table, dest, table);
3960run_single_final(VALUE cmd, VALUE objid)
3969 VALUE errinfo = ec->errinfo;
3970 rb_warn(
"Exception in finalizer %+"PRIsVALUE,
final);
3971 rb_ec_error_print(ec, errinfo);
3976run_finalizer(
rb_objspace_t *objspace, VALUE obj, VALUE table)
3979 enum ruby_tag_type state;
3988#define RESTORE_FINALIZER() (\
3989 ec->cfp = saved.cfp, \
3990 ec->errinfo = saved.errinfo)
3992 saved.errinfo = ec->errinfo;
3994 saved.cfp = ec->cfp;
3999 state = EC_EXEC_TAG();
4000 if (state != TAG_NONE) {
4002 warn_exception_in_finalizer(ec, ATOMIC_VALUE_EXCHANGE(saved.final,
Qundef));
4004 for (i = saved.finished;
4006 saved.finished = ++i) {
4007 run_single_final(saved.final =
RARRAY_AREF(table, i), saved.objid);
4010#undef RESTORE_FINALIZER
4016 st_data_t key, table;
4018 if (RZOMBIE(zombie)->dfree) {
4019 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
4022 key = (st_data_t)zombie;
4023 if (st_delete(finalizer_table, &key, &table)) {
4024 run_finalizer(objspace, zombie, (VALUE)table);
4034 asan_unpoison_object(zombie,
false);
4035 next_zombie = RZOMBIE(zombie)->next;
4036 page = GET_HEAP_PAGE(zombie);
4038 run_final(objspace, zombie);
4044 obj_free_object_id(objspace, zombie);
4047 GC_ASSERT(heap_pages_final_slots > 0);
4048 GC_ASSERT(page->final_slots > 0);
4050 heap_pages_final_slots--;
4051 page->final_slots--;
4053 heap_page_add_freeobj(objspace, page, zombie);
4054 objspace->profile.total_freed_objects++;
4058 zombie = next_zombie;
4067 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
4069 while ((zombie = ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
4070 finalize_list(objspace, zombie);
4073 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
4077gc_finalize_deferred(
void *dmy)
4080 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4082 finalize_deferred(objspace);
4083 ATOMIC_SET(finalizing, 0);
4090 rb_bug(
"gc_finalize_deferred_register: can't register finalizer.");
4101force_chain_object(st_data_t key, st_data_t val, st_data_t arg)
4112bool rb_obj_is_main_ractor(VALUE gv);
4119#if RGENGC_CHECK_MODE >= 2
4120 gc_verify_internal_consistency(objspace);
4124 if (ATOMIC_EXCHANGE(finalizing, 1))
return;
4127 finalize_deferred(objspace);
4128 GC_ASSERT(heap_pages_deferred_final == 0);
4132 objspace->flags.dont_incremental = 1;
4135 while (finalizer_table->num_entries) {
4137 st_foreach(finalizer_table, force_chain_object, (st_data_t)&list);
4140 st_data_t obj = (st_data_t)curr->obj;
4141 run_finalizer(objspace, curr->obj, curr->table);
4142 st_delete(finalizer_table, &obj, 0);
4152 unsigned int lock_lev;
4153 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
4156 for (i = 0; i < heap_allocated_pages; i++) {
4157 struct heap_page *page = heap_pages_sorted[i];
4158 short stride = page->slot_size;
4160 uintptr_t p = (uintptr_t)page->start;
4161 uintptr_t pend = p + page->total_slots * stride;
4162 for (; p < pend; p += stride) {
4163 VALUE vp = (
VALUE)p;
4164 void *poisoned = asan_poisoned_object_p(vp);
4165 asan_unpoison_object(vp,
false);
4168 if (!
DATA_PTR(p) || !RANY(p)->as.data.dfree)
break;
4169 if (rb_obj_is_thread(vp))
break;
4170 if (rb_obj_is_mutex(vp))
break;
4172 if (rb_obj_is_main_ractor(vp))
break;
4174 RDATA(p)->dfree = RANY(p)->as.typeddata.type->function.dfree;
4176 RANY(p)->as.free.flags = 0;
4180 else if (RANY(p)->as.data.dfree) {
4181 make_zombie(objspace, vp, RANY(p)->as.data.dfree, RANY(p)->as.data.data);
4185 if (RANY(p)->as.file.fptr) {
4186 make_io_zombie(objspace, vp);
4194 asan_poison_object(vp);
4199 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
4201 if (heap_pages_deferred_final) {
4202 finalize_list(objspace, heap_pages_deferred_final);
4205 st_free_table(finalizer_table);
4206 finalizer_table = 0;
4207 ATOMIC_SET(finalizing, 0);
4213 struct heap_page *page = GET_HEAP_PAGE(ptr);
4214 return page->flags.before_sweep ? FALSE : TRUE;
4221 if (!is_lazy_sweeping(objspace) ||
4222 is_swept_object(objspace, ptr) ||
4223 MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(ptr), ptr)) {
4244 if (!is_garbage_object(objspace, ptr)) {
4256 check_rvalue_consistency(obj);
4261rb_objspace_markable_object_p(VALUE obj)
4264 return is_markable_object(objspace, obj) && is_live_object(objspace, obj);
4268rb_objspace_garbage_object_p(VALUE obj)
4271 return is_garbage_object(objspace, obj);
4278 if (st_lookup(objspace->id_to_obj_tbl, objid, &orig)) {
4304#if SIZEOF_LONG == SIZEOF_VOIDP
4305#define NUM2PTR(x) NUM2ULONG(x)
4306#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
4307#define NUM2PTR(x) NUM2ULL(x)
4314 objid = rb_to_int(objid);
4315 if (
FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
4316 ptr = NUM2PTR(objid);
4320 if (
FIXNUM_P(ptr))
return (VALUE)ptr;
4321 if (
FLONUM_P(ptr))
return (VALUE)ptr;
4323 ptr = obj_id_to_ref(objid);
4324 if ((ptr %
sizeof(
RVALUE)) == (4 << 2)) {
4325 ID symid = ptr /
sizeof(
RVALUE);
4328 rb_raise(rb_eRangeError,
"%p is not symbol id value", p0);
4333 if ((orig = id2ref_obj_tbl(objspace, objid)) !=
Qundef &&
4334 is_live_object(objspace, orig)) {
4340 rb_raise(rb_eRangeError,
"%+"PRIsVALUE
" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
4344 if (rb_int_ge(objid, objspace->next_object_id)) {
4345 rb_raise(rb_eRangeError,
"%+"PRIsVALUE
" is not id value", rb_int2str(objid, 10));
4348 rb_raise(rb_eRangeError,
"%+"PRIsVALUE
" is recycled object", rb_int2str(objid, 10));
4353os_id2ref(VALUE os, VALUE objid)
4355 return id2ref(objid);
4359rb_find_object_id(VALUE obj, VALUE (*get_heap_object_id)(VALUE))
4365#if SIZEOF_LONG == SIZEOF_VOIDP
4375 return get_heap_object_id(obj);
4379cached_object_id(VALUE obj)
4385 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &
id)) {
4391 id = objspace->next_object_id;
4392 objspace->next_object_id = rb_int_plus(
id,
INT2FIX(OBJ_ID_INCREMENT));
4394 VALUE already_disabled = rb_gc_disable_no_rest();
4395 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)
id);
4396 st_insert(objspace->id_to_obj_tbl, (st_data_t)
id, (st_data_t)obj);
4397 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
4406nonspecial_obj_id_(VALUE obj)
4408 return nonspecial_obj_id(obj);
4415 return rb_find_object_id(obj, nonspecial_obj_id_);
4477 return rb_find_object_id(obj, cached_object_id);
4480static enum rb_id_table_iterator_result
4481cc_table_memsize_i(VALUE ccs_ptr,
void *data_ptr)
4483 size_t *total_size = data_ptr;
4485 *total_size +=
sizeof(*ccs);
4486 *total_size +=
sizeof(ccs->entries[0]) * ccs->capa;
4487 return ID_TABLE_CONTINUE;
4493 size_t total = rb_id_table_memsize(cc_table);
4494 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
4499obj_memsize_of(VALUE obj,
int use_all_types)
4508 size += rb_generic_ivar_memsize(obj);
4513 if (!(
RBASIC(obj)->flags & ROBJECT_EMBED)) {
4519 if (RCLASS_EXT(obj)) {
4520 if (RCLASS_M_TBL(obj)) {
4521 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4523 if (RCLASS_IV_TBL(obj)) {
4524 size += st_memsize(RCLASS_IV_TBL(obj));
4526 if (RCLASS_CVC_TBL(obj)) {
4527 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
4529 if (RCLASS_IV_INDEX_TBL(obj)) {
4531 size += st_memsize(RCLASS_IV_INDEX_TBL(obj));
4533 if (RCLASS_EXT(obj)->iv_tbl) {
4534 size += st_memsize(RCLASS_EXT(obj)->iv_tbl);
4536 if (RCLASS_EXT(obj)->const_tbl) {
4537 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
4539 if (RCLASS_CC_TBL(obj)) {
4540 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4548 if (RICLASS_OWNS_M_TBL_P(obj)) {
4549 if (RCLASS_M_TBL(obj)) {
4550 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
4553 if (RCLASS_EXT(obj) && RCLASS_CC_TBL(obj)) {
4554 size += cc_table_memsize(RCLASS_CC_TBL(obj));
4558 size += rb_str_memsize(obj);
4561 size += rb_ary_memsize(obj);
4564 if (RHASH_AR_TABLE_P(obj)) {
4565 if (RHASH_AR_TABLE(obj) != NULL) {
4566 size_t rb_hash_ar_table_size(
void);
4567 size += rb_hash_ar_table_size();
4571 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
4572 size += st_memsize(RHASH_ST_TABLE(obj));
4581 if (use_all_types) size += rb_objspace_data_type_memsize(obj);
4586 size += onig_region_memsize(&rm->
regs);
4588 size +=
sizeof(
struct rmatch);
4592 if (
RFILE(obj)->fptr) {
4593 size += rb_io_memsize(
RFILE(obj)->fptr);
4600 size += imemo_memsize(obj);
4608 if (!(
RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
4609 size += BIGNUM_LEN(obj) *
sizeof(BDIGIT);
4614 UNEXPECTED_NODE(obj_memsize_of);
4618 if ((
RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
4619 RSTRUCT(obj)->as.heap.ptr) {
4629 rb_bug(
"objspace/memsize_of(): unknown data type 0x%x(%p)",
4633 return size + GET_HEAP_PAGE(obj)->slot_size;
4637rb_obj_memsize_of(VALUE obj)
4639 return obj_memsize_of(obj, TRUE);
4643set_zero(st_data_t key, st_data_t val, st_data_t arg)
4645 VALUE k = (
VALUE)key;
4646 VALUE hash = (
VALUE)arg;
4652type_sym(
size_t type)
4655#define COUNT_TYPE(t) case (t): return ID2SYM(rb_intern(#t)); break;
4724count_objects(
int argc, VALUE *argv, VALUE os)
4736 rb_raise(rb_eTypeError,
"non-hash given");
4739 for (i = 0; i <=
T_MASK; i++) {
4743 for (i = 0; i < heap_allocated_pages; i++) {
4744 struct heap_page *page = heap_pages_sorted[i];
4745 short stride = page->slot_size;
4747 uintptr_t p = (uintptr_t)page->start;
4748 uintptr_t pend = p + page->total_slots * stride;
4749 for (;p < pend; p += stride) {
4750 VALUE vp = (
VALUE)p;
4751 GC_ASSERT((NUM_IN_PAGE(vp) *
sizeof(
RVALUE)) % page->slot_size == 0);
4753 void *poisoned = asan_poisoned_object_p(vp);
4754 asan_unpoison_object(vp,
false);
4755 if (RANY(p)->as.basic.flags) {
4763 asan_poison_object(vp);
4766 total += page->total_slots;
4773 rb_hash_stlike_foreach(hash, set_zero, hash);
4778 for (i = 0; i <=
T_MASK; i++) {
4779 VALUE
type = type_sym(i);
4796 size_t total_slots = 0;
4797 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
4799 total_slots += SIZE_POOL_EDEN_HEAP(size_pool)->total_slots;
4800 total_slots += SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
4808 return (objspace->total_allocated_objects - objspace->profile.total_freed_objects) - heap_pages_final_slots;
4814 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - heap_pages_final_slots;
4818gc_setup_mark_bits(
struct heap_page *page)
4821 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
4824static int gc_is_moveable_obj(
rb_objspace_t *objspace, VALUE obj);
4825static VALUE gc_move(
rb_objspace_t *objspace, VALUE scan, VALUE free,
size_t slot_size);
4833 if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_NOACCESS, &old_protect)) {
4835 if (mprotect(body, HEAP_PAGE_SIZE, PROT_NONE)) {
4837 rb_bug(
"Couldn't protect page %p, errno: %s", (
void *)body, strerror(errno));
4840 gc_report(5, objspace,
"Protecting page in move %p\n", (
void *)body);
4850 if (!VirtualProtect(body, HEAP_PAGE_SIZE, PAGE_READWRITE, &old_protect)) {
4852 if (mprotect(body, HEAP_PAGE_SIZE, PROT_READ | PROT_WRITE)) {
4854 rb_bug(
"Couldn't unprotect page %p, errno: %s", (
void *)body, strerror(errno));
4857 gc_report(5, objspace,
"Unprotecting page in move %p\n", (
void *)body);
4868 objspace->rcompactor.considered_count_table[
BUILTIN_TYPE((VALUE)p)]++;
4870 if (gc_is_moveable_obj(objspace, (VALUE)p)) {
4872 objspace->rcompactor.moved_count_table[
BUILTIN_TYPE((VALUE)p)]++;
4873 objspace->rcompactor.total_moved++;
4875 bool from_freelist =
false;
4878 from_freelist =
true;
4881 gc_move(objspace, (VALUE)p, dest, page->slot_size);
4882 gc_pin(objspace, (VALUE)p);
4883 heap->compact_cursor_index = (
RVALUE *)p;
4884 if (from_freelist) {
4885 FL_SET((VALUE)p, FL_FROM_FREELIST);
4902 struct heap_page * cursor = heap->compact_cursor;
4904 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
4913 bits_t *mark_bits = cursor->mark_bits;
4914 bits_t *pin_bits = cursor->pinned_bits;
4917 if (heap->compact_cursor_index) {
4918 index = BITMAP_INDEX(heap->compact_cursor_index);
4919 p = heap->compact_cursor_index;
4920 GC_ASSERT(cursor == GET_HEAP_PAGE(p));
4927 bits_t bits = mark_bits[index] & ~pin_bits[index];
4929 bits >>= NUM_IN_PAGE(p);
4930 if (try_move_plane(objspace, heap, sweep_page, (uintptr_t)p, bits, dest))
return 1;
4933 p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start));
4936 p = cursor->start + (BITS_BITLENGTH - NUM_IN_PAGE(cursor->start)) + (BITS_BITLENGTH * index);
4941 for (
size_t i = index + 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4942 bits_t bits = mark_bits[i] & ~pin_bits[i];
4943 if (try_move_plane(objspace, heap, sweep_page, (uintptr_t)p, bits, dest))
return 1;
4944 p += BITS_BITLENGTH;
4953 next = list_prev(&heap->pages, cursor, page_node);
4956 lock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4958 heap->compact_cursor = next;
4959 heap->compact_cursor_index = 0;
4965 if (next == sweep_page) {
4976 struct heap_page *cursor = heap->compact_cursor;
4979 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
4980 cursor = list_next(&heap->pages, cursor, page_node);
4988read_barrier_handler(uintptr_t address)
4993 address -= address %
sizeof(
RVALUE);
4995 obj = (
VALUE)address;
4999 unlock_page_body(objspace, GET_PAGE_BODY(obj));
5001 objspace->profile.read_barrier_faults++;
5003 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
5009static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
5010typedef void (*signal_handler)(int);
5011static signal_handler old_sigsegv_handler;
5014read_barrier_signal(EXCEPTION_POINTERS * info)
5017 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
5022 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
5023 return EXCEPTION_CONTINUE_EXECUTION;
5026 return EXCEPTION_CONTINUE_SEARCH;
5031uninstall_handlers(
void)
5033 signal(SIGSEGV, old_sigsegv_handler);
5034 SetUnhandledExceptionFilter(old_handler);
5038install_handlers(
void)
5041 old_sigsegv_handler = signal(SIGSEGV, NULL);
5044 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
5047static struct sigaction old_sigbus_handler;
5048static struct sigaction old_sigsegv_handler;
5051read_barrier_signal(
int sig,
siginfo_t * info,
void * data)
5054 struct sigaction prev_sigbus, prev_sigsegv;
5055 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
5056 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
5059 sigset_t set, prev_set;
5061 sigaddset(&set, SIGBUS);
5062 sigaddset(&set, SIGSEGV);
5063 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
5066 read_barrier_handler((uintptr_t)info->si_addr);
5070 sigaction(SIGSEGV, &prev_sigsegv, NULL);
5071 sigprocmask(SIG_SETMASK, &prev_set, NULL);
5075uninstall_handlers(
void)
5077 sigaction(SIGBUS, &old_sigbus_handler, NULL);
5078 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
5082install_handlers(
void)
5085 memset(&action, 0,
sizeof(
struct sigaction));
5086 sigemptyset(&action.sa_mask);
5087 action.sa_sigaction = read_barrier_signal;
5088 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
5090 sigaction(SIGBUS, &action, &old_sigbus_handler);
5091 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
5096revert_stack_objects(VALUE stack_obj,
void *ctx)
5104 invalidate_moved_page(objspace, GET_HEAP_PAGE(stack_obj));
5109revert_machine_stack_references(
rb_objspace_t *objspace, VALUE v)
5111 if (is_pointer_to_heap(objspace, (
void *)v)) {
5116 invalidate_moved_page(objspace, GET_HEAP_PAGE(v));
5127 rb_vm_t *vm = rb_ec_vm_ptr(ec);
5128 rb_vm_each_stack_value(vm, revert_stack_objects, (
void*)objspace);
5129 each_machine_stack_value(ec, revert_machine_stack_references);
5135 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5137 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5138 gc_unprotect_pages(objspace, heap);
5141 uninstall_handlers();
5148 check_stack_for_moved(objspace);
5150 gc_update_references(objspace);
5151 objspace->profile.compact_count++;
5153 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5155 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5156 heap->compact_cursor = NULL;
5157 heap->compact_cursor_index = 0;
5160 if (gc_prof_enabled(objspace)) {
5162 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
5164 objspace->flags.during_compacting = FALSE;
5177 struct heap_page * sweep_page = ctx->page;
5180 short slot_size = sweep_page->slot_size;
5181 short slot_bits = slot_size /
sizeof(
RVALUE);
5185 VALUE dest = (
VALUE)p;
5187 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(dest), dest));
5188 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
5190 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(dest), dest);
5192 if (*finished_compacting) {
5199 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)dest,
sizeof(
RVALUE));
5200 heap_page_add_freeobj(objspace, sweep_page, dest);
5206 if (!try_move(objspace, heap, sweep_page, dest)) {
5207 *finished_compacting =
true;
5208 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p,
sizeof(
RVALUE));
5209 gc_report(5, objspace,
"Quit compacting, couldn't find an object to move\n");
5216 heap_page_add_freeobj(objspace, sweep_page, dest);
5217 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(dest));
5226 bitset >>= slot_bits;
5235 bool finished_compacting =
false;
5236 bits_t *mark_bits, *pin_bits;
5240 mark_bits = sweep_page->mark_bits;
5241 pin_bits = sweep_page->pinned_bits;
5243 p = (uintptr_t)sweep_page->start;
5245 struct heap_page * cursor = heap->compact_cursor;
5247 unlock_page_body(objspace, GET_PAGE_BODY(cursor->start));
5250 bitset = pin_bits[0] & ~mark_bits[0];
5251 bitset >>= NUM_IN_PAGE(p);
5252 gc_fill_swept_plane(objspace, heap, (uintptr_t)p, bitset, &finished_compacting, ctx);
5253 p += ((BITS_BITLENGTH - NUM_IN_PAGE(p)) *
sizeof(
RVALUE));
5255 for (
int i = 1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5257 bitset = pin_bits[i] & ~mark_bits[i];
5258 gc_fill_swept_plane(objspace, heap, (uintptr_t)p, bitset, &finished_compacting, ctx);
5259 p += ((BITS_BITLENGTH) *
sizeof(
RVALUE));
5262 lock_page_body(objspace, GET_PAGE_BODY(heap->compact_cursor->start));
5264 return finished_compacting;
5270 struct heap_page * sweep_page = ctx->page;
5271 short slot_size = sweep_page->slot_size;
5272 short slot_bits = slot_size /
sizeof(
RVALUE);
5273 GC_ASSERT(slot_bits > 0);
5276 VALUE vp = (
VALUE)p;
5277 GC_ASSERT(vp %
sizeof(
RVALUE) == 0);
5279 asan_unpoison_object(vp,
false);
5283 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
5284#if RGENGC_CHECK_MODE
5285 if (!is_full_marking(objspace)) {
5286 if (RVALUE_OLD_P(vp))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
5287 if (rgengc_remembered_sweep(objspace, vp))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
5290 if (obj_free(objspace, vp)) {
5291 if (heap->compact_cursor) {
5293 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
5296 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p,
sizeof(
RVALUE));
5297 heap_page_add_freeobj(objspace, sweep_page, vp);
5298 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5308 if (objspace->flags.during_compacting) {
5314 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished\n");
5316 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", obj_info(vp));
5317 if (
FL_TEST(vp, FL_FROM_FREELIST)) {
5323 heap_page_add_freeobj(objspace, sweep_page, vp);
5329 if (heap->compact_cursor) {
5331 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(vp), vp);
5340 bitset >>= slot_bits;
5347 struct heap_page *sweep_page = ctx->page;
5352 bits_t *bits, bitset;
5354 gc_report(2, objspace,
"page_sweep: start.\n");
5356 if (heap->compact_cursor) {
5357 if (sweep_page == heap->compact_cursor) {
5359 gc_report(5, objspace,
"Quit compacting, mark and compact cursor met\n");
5360 gc_compact_finish(objspace, size_pool, heap);
5364 asan_unpoison_memory_region(&sweep_page->freelist,
sizeof(
RVALUE*),
false);
5365 sweep_page->freelist = NULL;
5366 asan_poison_memory_region(&sweep_page->freelist,
sizeof(
RVALUE*));
5370 sweep_page->flags.before_sweep = FALSE;
5371 sweep_page->free_slots = 0;
5373 p = sweep_page->start;
5374 bits = sweep_page->mark_bits;
5376 int page_rvalue_count = sweep_page->total_slots * (size_pool->slot_size /
sizeof(
RVALUE));
5377 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
5378 if (out_of_range_bits != 0) {
5379 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
5384 bitset >>= NUM_IN_PAGE(p);
5386 gc_sweep_plane(objspace, heap, (uintptr_t)p, bitset, ctx);
5388 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
5390 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5393 gc_sweep_plane(objspace, heap, (uintptr_t)p, bitset, ctx);
5395 p += BITS_BITLENGTH;
5398 if (heap->compact_cursor) {
5399 if (gc_fill_swept_page(objspace, heap, sweep_page, ctx)) {
5400 gc_compact_finish(objspace, size_pool, heap);
5404 if (!heap->compact_cursor) {
5405 gc_setup_mark_bits(sweep_page);
5408#if GC_PROFILE_MORE_DETAIL
5409 if (gc_prof_enabled(objspace)) {
5411 record->removing_objects += ctx->final_slots + ctx->freed_slots;
5412 record->empty_objects += ctx->empty_slots;
5415 if (0) fprintf(stderr,
"gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
5417 sweep_page->total_slots,
5418 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
5420 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
5421 objspace->profile.total_freed_objects += ctx->freed_slots;
5423 if (heap_pages_deferred_final && !finalizing) {
5426 gc_finalize_deferred_register(objspace);
5430#if RGENGC_CHECK_MODE
5431 short freelist_len = 0;
5432 RVALUE *ptr = sweep_page->freelist;
5435 ptr = ptr->as.free.next;
5437 if (freelist_len != sweep_page->free_slots) {
5438 rb_bug(
"inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
5442 gc_report(2, objspace,
"page_sweep: end.\n");
5450 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5451 if (!heap->free_pages && heap_increment(objspace, size_pool, heap) == FALSE) {
5453 size_pool_allocatable_pages_set(objspace, size_pool, 1);
5454 if (!heap_increment(objspace, size_pool, heap)) {
5463gc_mode_name(
enum gc_mode mode)
5466 case gc_mode_none:
return "none";
5467 case gc_mode_marking:
return "marking";
5468 case gc_mode_sweeping:
return "sweeping";
5469 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
5474gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode)
5476#if RGENGC_CHECK_MODE
5477 enum gc_mode prev_mode = gc_mode(objspace);
5478 switch (prev_mode) {
5479 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking);
break;
5480 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping);
break;
5481 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none);
break;
5484 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
5485 gc_mode_set(objspace, mode);
5492 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
5493 if (page->freelist) {
5494 RVALUE *p = page->freelist;
5495 asan_unpoison_object((VALUE)p,
false);
5496 while (p->as.free.next) {
5498 p = p->as.free.next;
5499 asan_poison_object((VALUE)prev);
5500 asan_unpoison_object((VALUE)p,
false);
5502 p->as.free.next = freelist;
5503 asan_poison_object((VALUE)p);
5506 page->freelist = freelist;
5508 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
5515 heap->sweeping_page = list_top(&heap->pages,
struct heap_page, page_node);
5516 heap->free_pages = NULL;
5517#if GC_ENABLE_INCREMENTAL_MARK
5518 heap->pooled_pages = NULL;
5522#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
5523__attribute__((noinline))
5528 gc_mode_transition(objspace, gc_mode_sweeping);
5530#if GC_ENABLE_INCREMENTAL_MARK
5531 objspace->rincgc.pooled_slots = 0;
5534 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5537 gc_sweep_start_heap(objspace, SIZE_POOL_EDEN_HEAP(size_pool));
5541 list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
5542 rb_gc_ractor_newobj_cache_clear(&r->newobj_cache);
5550 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
5551 size_t total_slots = heap->total_slots + SIZE_POOL_TOMB_HEAP(size_pool)->total_slots;
5552 size_t total_pages = heap->total_pages + SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5553 size_t swept_slots = size_pool->freed_slots + size_pool->empty_slots;
5555 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5557 if (swept_slots < min_free_slots) {
5558 bool grow_heap = is_full_marking(objspace);
5560 if (!is_full_marking(objspace)) {
5562 bool is_growth_heap = size_pool->empty_slots == 0 ||
5563 size_pool->freed_slots > size_pool->empty_slots;
5565 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5568 else if (is_growth_heap) {
5569 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
5570 size_pool->force_major_gc_count++;
5575 size_t extend_page_count = heap_extend_pages(objspace, swept_slots, total_slots, total_pages);
5577 if (extend_page_count > size_pool->allocatable_pages) {
5578 size_pool_allocatable_pages_set(objspace, size_pool, extend_page_count);
5581 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5590 gc_report(1, objspace,
"gc_sweep_finish\n");
5592 gc_prof_set_heap_info(objspace);
5593 heap_pages_free_unused_pages(objspace);
5595 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5599 size_t tomb_pages = SIZE_POOL_TOMB_HEAP(size_pool)->total_pages;
5600 if (size_pool->allocatable_pages < tomb_pages) {
5601 size_pool->allocatable_pages = tomb_pages;
5605 size_pool->freed_slots = 0;
5606 size_pool->empty_slots = 0;
5608#if GC_ENABLE_INCREMENTAL_MARK
5609 if (!will_be_incremental_marking(objspace)) {
5610 rb_heap_t *eden_heap = SIZE_POOL_EDEN_HEAP(size_pool);
5611 struct heap_page *end_page = eden_heap->free_pages;
5613 while (end_page->free_next) end_page = end_page->free_next;
5614 end_page->free_next = eden_heap->pooled_pages;
5617 eden_heap->free_pages = eden_heap->pooled_pages;
5619 eden_heap->pooled_pages = NULL;
5620 objspace->rincgc.pooled_slots = 0;
5625 heap_pages_expand_sorted(objspace);
5628 gc_mode_transition(objspace, gc_mode_none);
5630#if RGENGC_CHECK_MODE >= 2
5631 gc_verify_internal_consistency(objspace);
5638 struct heap_page *sweep_page = heap->sweeping_page;
5639 int unlink_limit = 3;
5641#if GC_ENABLE_INCREMENTAL_MARK
5642 int swept_slots = 0;
5644 bool need_pool = TRUE;
5646 int need_pool = will_be_incremental_marking(objspace) ? TRUE : FALSE;
5649 gc_report(2, objspace,
"gc_sweep_step (need_pool: %d)\n", need_pool);
5651 gc_report(2, objspace,
"gc_sweep_step\n");
5654 if (sweep_page == NULL)
return FALSE;
5656#if GC_ENABLE_LAZY_SWEEP
5657 gc_prof_sweep_timer_start(objspace);
5661 RUBY_DEBUG_LOG(
"sweep_page:%p", (
void *)sweep_page);
5669 gc_sweep_page(objspace, size_pool, heap, &ctx);
5670 int free_slots = ctx.freed_slots + ctx.empty_slots;
5672 heap->sweeping_page = list_next(&heap->pages, sweep_page, page_node);
5674 if (sweep_page->final_slots + free_slots == sweep_page->total_slots &&
5675 heap_pages_freeable_pages > 0 &&
5677 heap_pages_freeable_pages--;
5680 heap_unlink_page(objspace, heap, sweep_page);
5681 heap_add_page(objspace, size_pool, SIZE_POOL_TOMB_HEAP(size_pool), sweep_page);
5683 else if (free_slots > 0) {
5685 size_pool->freed_slots += ctx.freed_slots;
5686 size_pool->empty_slots += ctx.empty_slots;
5689#if GC_ENABLE_INCREMENTAL_MARK
5691 heap_add_poolpage(objspace, heap, sweep_page);
5695 heap_add_freepage(heap, sweep_page);
5696 swept_slots += free_slots;
5697 if (swept_slots > 2048) {
5702 heap_add_freepage(heap, sweep_page);
5707 sweep_page->free_next = NULL;
5709 }
while ((sweep_page = heap->sweeping_page));
5711 if (!heap->sweeping_page) {
5713 gc_sweep_finish_size_pool(objspace, size_pool);
5716 if (!has_sweeping_pages(objspace)) {
5717 gc_sweep_finish(objspace);
5721#if GC_ENABLE_LAZY_SWEEP
5722 gc_prof_sweep_timer_stop(objspace);
5725 return heap->free_pages != NULL;
5731 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5734 while (SIZE_POOL_EDEN_HEAP(size_pool)->sweeping_page) {
5735 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5743 GC_ASSERT(dont_gc_val() == FALSE);
5744 if (!GC_ENABLE_LAZY_SWEEP)
return;
5746 unsigned int lock_lev;
5747 gc_enter(objspace, gc_enter_event_sweep_continue, &lock_lev);
5749 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5751 if (!gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool))) {
5754 if (size_pool == sweep_size_pool) {
5755 if (size_pool->allocatable_pages > 0) {
5756 heap_increment(objspace, size_pool, heap);
5760 gc_sweep_rest(objspace);
5768 gc_exit(objspace, gc_enter_event_sweep_continue, &lock_lev);
5777 VALUE forwarding_object = (
VALUE)p;
5781 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object));
5782 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5784 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
5786 bool from_freelist =
FL_TEST_RAW(forwarding_object, FL_FROM_FREELIST);
5789 gc_move(objspace,
object, forwarding_object, page->slot_size);
5792 struct heap_page *orig_page = GET_HEAP_PAGE(
object);
5793 orig_page->free_slots++;
5794 if (!from_freelist) {
5795 objspace->profile.total_freed_objects++;
5797 heap_page_add_freeobj(objspace, orig_page,
object);
5799 GC_ASSERT(MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(forwarding_object), forwarding_object));
5814 bits_t *mark_bits, *pin_bits;
5818 mark_bits = page->mark_bits;
5819 pin_bits = page->pinned_bits;
5824 bitset = pin_bits[0] & ~mark_bits[0];
5825 bitset >>= NUM_IN_PAGE(p);
5826 invalidate_moved_plane(objspace, page, (uintptr_t)p, bitset);
5827 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
5829 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
5832 bitset = pin_bits[i] & ~mark_bits[i];
5834 invalidate_moved_plane(objspace, page, (uintptr_t)p, bitset);
5835 p += BITS_BITLENGTH;
5844 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5845 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
5846 list_for_each(&heap->pages, page, page_node) {
5847 page->flags.before_sweep = TRUE;
5850 heap->compact_cursor = list_tail(&heap->pages,
struct heap_page, page_node);
5851 heap->compact_cursor_index = 0;
5854 if (gc_prof_enabled(objspace)) {
5856 record->moved_objects = objspace->rcompactor.total_moved;
5859 memset(objspace->rcompactor.considered_count_table, 0,
T_MASK *
sizeof(
size_t));
5860 memset(objspace->rcompactor.moved_count_table, 0,
T_MASK *
sizeof(
size_t));
5869 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
5871 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
5873 if (immediate_sweep) {
5874#if !GC_ENABLE_LAZY_SWEEP
5875 gc_prof_sweep_timer_start(objspace);
5877 gc_sweep_start(objspace);
5878 if (objspace->flags.during_compacting) {
5879 gc_compact_start(objspace);
5882 gc_sweep_rest(objspace);
5883#if !GC_ENABLE_LAZY_SWEEP
5884 gc_prof_sweep_timer_stop(objspace);
5889 gc_sweep_start(objspace);
5891 if (ruby_enable_autocompact && is_full_marking(objspace)) {
5892 gc_compact_start(objspace);
5895 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5896 list_for_each(&(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages), page, page_node) {
5897 page->flags.before_sweep = TRUE;
5902 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
5904 gc_sweep_step(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5910 gc_heap_prepare_minimum_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
5917stack_chunk_alloc(
void)
5931 return stack->chunk == NULL;
5937 size_t size = stack->index;
5938 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
5941 size += stack->limit;
5942 chunk = chunk->next;
5950 chunk->next = stack->cache;
5951 stack->cache = chunk;
5952 stack->cache_size++;
5960 if (stack->unused_cache_size > (stack->cache_size/2)) {
5961 chunk = stack->cache;
5962 stack->cache = stack->cache->next;
5963 stack->cache_size--;
5966 stack->unused_cache_size = stack->cache_size;
5974 GC_ASSERT(stack->index == stack->limit);
5976 if (stack->cache_size > 0) {
5977 next = stack->cache;
5978 stack->cache = stack->cache->next;
5979 stack->cache_size--;
5980 if (stack->unused_cache_size > stack->cache_size)
5981 stack->unused_cache_size = stack->cache_size;
5984 next = stack_chunk_alloc();
5986 next->next = stack->chunk;
5987 stack->chunk = next;
5996 prev = stack->chunk->next;
5997 GC_ASSERT(stack->index == 0);
5998 add_stack_chunk_cache(stack, stack->chunk);
5999 stack->chunk = prev;
6000 stack->index = stack->limit;
6008 while (chunk != NULL) {
6018 mark_stack_chunk_list_free(stack->chunk);
6024 mark_stack_chunk_list_free(stack->cache);
6025 stack->cache_size = 0;
6026 stack->unused_cache_size = 0;
6054 if (stack->index == stack->limit) {
6055 push_mark_stack_chunk(stack);
6057 stack->chunk->data[stack->index++] = data;
6067 rb_bug(
"push_mark_stack() called for broken object");
6071 UNEXPECTED_NODE(push_mark_stack);
6075 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
6077 is_pointer_to_heap(&
rb_objspace, (
void *)data) ?
"corrupted object" :
"non object");
6083 if (is_mark_stack_empty(stack)) {
6086 if (stack->index == 1) {
6087 *data = stack->chunk->data[--stack->index];
6088 pop_mark_stack_chunk(stack);
6091 *data = stack->chunk->data[--stack->index];
6102 stack->index = stack->limit = STACK_CHUNK_SIZE;
6104 for (i=0; i < 4; i++) {
6105 add_stack_chunk_cache(stack, stack_chunk_alloc());
6107 stack->unused_cache_size = stack->cache_size;
6112#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
6114#define STACK_START (ec->machine.stack_start)
6115#define STACK_END (ec->machine.stack_end)
6116#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
6118#if STACK_GROW_DIRECTION < 0
6119# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
6120#elif STACK_GROW_DIRECTION > 0
6121# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
6123# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
6124 : (size_t)(STACK_END - STACK_START + 1))
6126#if !STACK_GROW_DIRECTION
6127int ruby_stack_grow_direction;
6129ruby_get_stack_grow_direction(
volatile VALUE *addr)
6132 SET_MACHINE_STACK_END(&end);
6134 if (end > addr)
return ruby_stack_grow_direction = 1;
6135 return ruby_stack_grow_direction = -1;
6144 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
6145 return STACK_LENGTH;
6148#define PREVENT_STACK_OVERFLOW 1
6149#ifndef PREVENT_STACK_OVERFLOW
6150#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
6151# define PREVENT_STACK_OVERFLOW 1
6153# define PREVENT_STACK_OVERFLOW 0
6156#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
6162 size_t length = STACK_LENGTH;
6163 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
6165 return length > maximum_length;
6168#define stack_check(ec, water_mark) FALSE
6171#define STACKFRAME_FOR_CALL_CFUNC 2048
6173MJIT_FUNC_EXPORTED
int
6176 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
6182 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
6185ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(
static void each_location(
rb_objspace_t *objspace,
register const VALUE *x,
register long n,
void (*cb)(
rb_objspace_t *, VALUE)));
6202 if (end <= start)
return;
6204 each_location(objspace, start, n, cb);
6210 gc_mark_locations(&
rb_objspace, start, end, gc_mark_maybe);
6214gc_mark_values(
rb_objspace_t *objspace,
long n,
const VALUE *values)
6218 for (i=0; i<n; i++) {
6219 gc_mark(objspace, values[i]);
6224rb_gc_mark_values(
long n,
const VALUE *values)
6229 for (i=0; i<n; i++) {
6230 gc_mark_and_pin(objspace, values[i]);
6235gc_mark_stack_values(
rb_objspace_t *objspace,
long n,
const VALUE *values)
6239 for (i=0; i<n; i++) {
6240 if (is_markable_object(objspace, values[i])) {
6241 gc_mark_and_pin(objspace, values[i]);
6247rb_gc_mark_vm_stack_values(
long n,
const VALUE *values)
6250 gc_mark_stack_values(objspace, n, values);
6254mark_value(st_data_t key, st_data_t value, st_data_t data)
6257 gc_mark(objspace, (VALUE)value);
6262mark_value_pin(st_data_t key, st_data_t value, st_data_t data)
6265 gc_mark_and_pin(objspace, (VALUE)value);
6272 if (!tbl || tbl->num_entries == 0)
return;
6273 st_foreach(tbl, mark_value, (st_data_t)objspace);
6279 if (!tbl || tbl->num_entries == 0)
return;
6280 st_foreach(tbl, mark_value_pin, (st_data_t)objspace);
6284mark_key(st_data_t key, st_data_t value, st_data_t data)
6287 gc_mark_and_pin(objspace, (VALUE)key);
6295 st_foreach(tbl, mark_key, (st_data_t)objspace);
6299pin_value(st_data_t key, st_data_t value, st_data_t data)
6302 gc_mark_and_pin(objspace, (VALUE)value);
6310 st_foreach(tbl, pin_value, (st_data_t)objspace);
6320mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
6324 gc_mark(objspace, (VALUE)key);
6325 gc_mark(objspace, (VALUE)value);
6330pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
6334 gc_mark_and_pin(objspace, (VALUE)key);
6335 gc_mark_and_pin(objspace, (VALUE)value);
6340pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
6344 gc_mark_and_pin(objspace, (VALUE)key);
6345 gc_mark(objspace, (VALUE)value);
6352 if (rb_hash_compare_by_id_p(hash)) {
6353 rb_hash_stlike_foreach(hash, pin_key_mark_value, (st_data_t)objspace);
6356 rb_hash_stlike_foreach(hash, mark_keyvalue, (st_data_t)objspace);
6359 if (RHASH_AR_TABLE_P(hash)) {
6360 if (LIKELY(during_gc) && RHASH_TRANSIENT_P(hash)) {
6361 rb_transient_heap_mark(hash, RHASH_AR_TABLE(hash));
6365 VM_ASSERT(!RHASH_TRANSIENT_P(hash));
6367 gc_mark(objspace, RHASH(hash)->ifnone);
6374 st_foreach(tbl, pin_key_pin_value, (st_data_t)objspace);
6388 gc_mark(objspace, me->owner);
6389 gc_mark(objspace, me->defined_class);
6392 switch (def->type) {
6393 case VM_METHOD_TYPE_ISEQ:
6394 if (def->body.iseq.
iseqptr) gc_mark(objspace, (VALUE)def->body.iseq.
iseqptr);
6395 gc_mark(objspace, (VALUE)def->body.iseq.
cref);
6397 if (def->iseq_overload && me->defined_class) {
6400 gc_mark_and_pin(objspace, (VALUE)me);
6403 case VM_METHOD_TYPE_ATTRSET:
6404 case VM_METHOD_TYPE_IVAR:
6405 gc_mark(objspace, def->body.attr.location);
6407 case VM_METHOD_TYPE_BMETHOD:
6408 gc_mark(objspace, def->body.bmethod.proc);
6409 if (def->body.bmethod.hooks) rb_hook_list_mark(def->body.bmethod.hooks);
6411 case VM_METHOD_TYPE_ALIAS:
6412 gc_mark(objspace, (VALUE)def->body.alias.original_me);
6414 case VM_METHOD_TYPE_REFINED:
6415 gc_mark(objspace, (VALUE)def->body.refined.orig_me);
6416 gc_mark(objspace, (VALUE)def->body.refined.owner);
6418 case VM_METHOD_TYPE_CFUNC:
6419 case VM_METHOD_TYPE_ZSUPER:
6420 case VM_METHOD_TYPE_MISSING:
6421 case VM_METHOD_TYPE_OPTIMIZED:
6422 case VM_METHOD_TYPE_UNDEF:
6423 case VM_METHOD_TYPE_NOTIMPLEMENTED:
6429static enum rb_id_table_iterator_result
6430mark_method_entry_i(VALUE me,
void *data)
6434 gc_mark(objspace, me);
6435 return ID_TABLE_CONTINUE;
6442 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
6446static enum rb_id_table_iterator_result
6447mark_const_entry_i(VALUE value,
void *data)
6452 gc_mark(objspace, ce->value);
6453 gc_mark(objspace, ce->file);
6454 return ID_TABLE_CONTINUE;
6461 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
6464#if STACK_GROW_DIRECTION < 0
6465#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
6466#elif STACK_GROW_DIRECTION > 0
6467#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
6469#define GET_STACK_BOUNDS(start, end, appendix) \
6470 ((STACK_END < STACK_START) ? \
6471 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
6475 const VALUE *stack_start,
const VALUE *stack_end,
void (*cb)(
rb_objspace_t *, VALUE));
6477#ifndef __EMSCRIPTEN__
6483 VALUE v[
sizeof(rb_jmp_buf) / (
sizeof(VALUE))];
6484 } save_regs_gc_mark;
6485 VALUE *stack_start, *stack_end;
6487 FLUSH_REGISTER_WINDOWS;
6488 memset(&save_regs_gc_mark, 0,
sizeof(save_regs_gc_mark));
6490 rb_setjmp(save_regs_gc_mark.j);
6496 GET_STACK_BOUNDS(stack_start, stack_end, 1);
6498 each_location(objspace, save_regs_gc_mark.v, numberof(save_regs_gc_mark.v), gc_mark_maybe);
6500 each_stack_location(objspace, ec, stack_start, stack_end, gc_mark_maybe);
6504static VALUE *rb_emscripten_stack_range_tmp[2];
6507rb_emscripten_mark_locations(
void *begin,
void *end)
6509 rb_emscripten_stack_range_tmp[0] = begin;
6510 rb_emscripten_stack_range_tmp[1] = end;
6516 emscripten_scan_stack(rb_emscripten_mark_locations);
6517 each_stack_location(objspace, ec, rb_emscripten_stack_range_tmp[0], rb_emscripten_stack_range_tmp[1], gc_mark_maybe);
6519 emscripten_scan_registers(rb_emscripten_mark_locations);
6520 each_stack_location(objspace, ec, rb_emscripten_stack_range_tmp[0], rb_emscripten_stack_range_tmp[1], gc_mark_maybe);
6528 VALUE *stack_start, *stack_end;
6530 GET_STACK_BOUNDS(stack_start, stack_end, 0);
6531 each_stack_location(objspace, ec, stack_start, stack_end, cb);
6537 each_machine_stack_value(ec, gc_mark_maybe);
6542 const VALUE *stack_start,
const VALUE *stack_end,
void (*cb)(
rb_objspace_t *, VALUE))
6545 gc_mark_locations(objspace, stack_start, stack_end, cb);
6547#if defined(__mc68000__)
6548 gc_mark_locations(objspace,
6549 (VALUE*)((
char*)stack_start + 2),
6550 (VALUE*)((
char*)stack_end - 2), cb);
6569 (void)VALGRIND_MAKE_MEM_DEFINED(&obj,
sizeof(obj));
6571 if (is_pointer_to_heap(objspace, (
void *)obj)) {
6572 void *ptr = __asan_region_is_poisoned((
void *)obj,
SIZEOF_VALUE);
6573 asan_unpoison_object(obj,
false);
6581 gc_mark_and_pin(objspace, obj);
6587 asan_poison_object(obj);
6601 ASSERT_vm_locking();
6602 if (RVALUE_MARKED(obj))
return 0;
6603 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
6610 struct heap_page *page = GET_HEAP_PAGE(obj);
6611 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
6613 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
6614 page->flags.has_uncollectible_shady_objects = TRUE;
6615 MARK_IN_BITMAP(uncollectible_bits, obj);
6616 objspace->rgengc.uncollectible_wb_unprotected_objects++;
6618#if RGENGC_PROFILE > 0
6619 objspace->profile.total_remembered_shady_object_count++;
6620#if RGENGC_PROFILE >= 2
6621 objspace->profile.remembered_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
6634 const VALUE old_parent = objspace->rgengc.parent_object;
6637 if (RVALUE_WB_UNPROTECTED(obj)) {
6638 if (gc_remember_unprotected(objspace, obj)) {
6639 gc_report(2, objspace,
"relation: (O->S) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6643 if (!RVALUE_OLD_P(obj)) {
6644 if (RVALUE_MARKED(obj)) {
6646 gc_report(2, objspace,
"relation: (O->unmarked Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6647 RVALUE_AGE_SET_OLD(objspace, obj);
6648 if (is_incremental_marking(objspace)) {
6649 if (!RVALUE_MARKING(obj)) {
6650 gc_grey(objspace, obj);
6654 rgengc_remember(objspace, obj);
6658 gc_report(2, objspace,
"relation: (O->Y) %s -> %s\n", obj_info(old_parent), obj_info(obj));
6659 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
6665 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
6671#if RGENGC_CHECK_MODE
6672 if (RVALUE_MARKED(obj) == FALSE)
rb_bug(
"gc_grey: %s is not marked.", obj_info(obj));
6673 if (RVALUE_MARKING(obj) == TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", obj_info(obj));
6676#if GC_ENABLE_INCREMENTAL_MARK
6677 if (is_incremental_marking(objspace)) {
6678 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
6682 push_mark_stack(&objspace->mark_stack, obj);
6688 struct heap_page *page = GET_HEAP_PAGE(obj);
6690 GC_ASSERT(RVALUE_MARKING(obj) == FALSE);
6691 check_rvalue_consistency(obj);
6693 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
6694 if (!RVALUE_OLD_P(obj)) {
6695 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", obj_info(obj));
6696 RVALUE_AGE_INC(objspace, obj);
6698 else if (is_full_marking(objspace)) {
6699 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
6700 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
6703 check_rvalue_consistency(obj);
6705 objspace->marked_slots++;
6708NOINLINE(
static void gc_mark_ptr(
rb_objspace_t *objspace, VALUE obj));
6709static void reachable_objects_from_callback(VALUE obj);
6714 if (LIKELY(during_gc)) {
6715 rgengc_check_relation(objspace, obj);
6716 if (!gc_mark_set(objspace, obj))
return;
6719 if (objspace->rgengc.parent_object) {
6720 RUBY_DEBUG_LOG(
"%p (%s) parent:%p (%s)",
6721 (
void *)obj, obj_type_name(obj),
6722 (
void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
6725 RUBY_DEBUG_LOG(
"%p (%s)", (
void *)obj, obj_type_name(obj));
6731 rb_bug(
"try to mark T_NONE object");
6733 gc_aging(objspace, obj);
6734 gc_grey(objspace, obj);
6737 reachable_objects_from_callback(obj);
6744 GC_ASSERT(is_markable_object(objspace, obj));
6745 if (UNLIKELY(objspace->flags.during_compacting)) {
6746 if (LIKELY(during_gc)) {
6747 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
6755 if (!is_markable_object(objspace, obj))
return;
6756 gc_pin(objspace, obj);
6757 gc_mark_ptr(objspace, obj);
6763 if (!is_markable_object(objspace, obj))
return;
6764 gc_mark_ptr(objspace, obj);
6784rb_objspace_marked_object_p(VALUE obj)
6786 return RVALUE_MARKED(obj) ? TRUE : FALSE;
6792 if (RVALUE_OLD_P(obj)) {
6793 objspace->rgengc.parent_object = obj;
6796 objspace->rgengc.parent_object =
Qfalse;
6803 switch (imemo_type(obj)) {
6808 if (LIKELY(env->ep)) {
6810 GC_ASSERT(env->ep[VM_ENV_DATA_INDEX_ENV] == obj);
6811 GC_ASSERT(VM_ENV_ESCAPED_P(env->ep));
6812 gc_mark_values(objspace, (
long)env->env_size, env->env);
6813 VM_ENV_FLAGS_SET(env->ep, VM_ENV_FLAG_WB_REQUIRED);
6814 gc_mark(objspace, (VALUE)rb_vm_env_prev_env(env));
6815 gc_mark(objspace, (VALUE)env->iseq);
6820 gc_mark(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
6821 gc_mark(objspace, (VALUE)RANY(obj)->as.imemo.cref.next);
6822 gc_mark(objspace, RANY(obj)->as.imemo.cref.refinements);
6825 gc_mark(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
6826 gc_mark(objspace, RANY(obj)->as.imemo.svar.lastline);
6827 gc_mark(objspace, RANY(obj)->as.imemo.svar.backref);
6828 gc_mark(objspace, RANY(obj)->as.imemo.svar.others);
6830 case imemo_throw_data:
6831 gc_mark(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
6834 gc_mark_maybe(objspace, (VALUE)RANY(obj)->as.imemo.ifunc.data);
6837 gc_mark(objspace, RANY(obj)->as.imemo.memo.v1);
6838 gc_mark(objspace, RANY(obj)->as.imemo.memo.v2);
6839 gc_mark_maybe(objspace, RANY(obj)->as.imemo.memo.u3.value);
6842 mark_method_entry(objspace, &RANY(obj)->as.imemo.ment);
6852 }
while ((m = m->next) != NULL);
6856 rb_ast_mark(&RANY(obj)->as.imemo.ast);
6858 case imemo_parser_strterm:
6859 rb_strterm_mark(obj);
6861 case imemo_callinfo:
6863 case imemo_callcache:
6867 gc_mark(objspace, (VALUE)vm_cc_cme(cc));
6870 case imemo_constcache:
6873 gc_mark(objspace, ice->value);
6876#if VM_CHECK_MODE > 0
6878 VM_UNREACHABLE(gc_mark_imemo);
6886 register RVALUE *any = RANY(obj);
6887 gc_mark_set_parent(objspace, obj);
6890 rb_mark_generic_ivar(obj);
6903 rb_bug(
"rb_gc_mark() called for broken object");
6911 gc_mark_imemo(objspace, obj);
6918 gc_mark(objspace, any->as.basic.
klass);
6926 if (!RCLASS_EXT(obj))
break;
6928 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6929 cc_table_mark(objspace, obj);
6930 mark_tbl_no_pin(objspace, RCLASS_IV_TBL(obj));
6931 mark_const_tbl(objspace, RCLASS_CONST_TBL(obj));
6935 if (RICLASS_OWNS_M_TBL_P(obj)) {
6936 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
6941 if (!RCLASS_EXT(obj))
break;
6942 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
6943 cc_table_mark(objspace, obj);
6948 VALUE root = any->as.array.as.heap.aux.
shared_root;
6949 gc_mark(objspace, root);
6954 for (i=0; i < len; i++) {
6955 gc_mark(objspace, ptr[i]);
6958 if (LIKELY(during_gc)) {
6961 rb_transient_heap_mark(obj, ptr);
6968 mark_hash(objspace, obj);
6972 if (STR_SHARED_P(obj)) {
6973 gc_mark(objspace, any->as.string.as.heap.aux.
shared);
6982 any->as.typeddata.
type->function.
dmark :
6984 if (mark_func) (*mark_func)(ptr);
6994 for (i = 0; i < len; i++) {
6995 gc_mark(objspace, ptr[i]);
6998 if (LIKELY(during_gc) &&
6999 ROBJ_TRANSIENT_P(obj)) {
7000 rb_transient_heap_mark(obj, ptr);
7006 if (any->as.file.
fptr) {
7007 gc_mark(objspace, any->as.file.
fptr->
self);
7008 gc_mark(objspace, any->as.file.
fptr->
pathv);
7018 gc_mark(objspace, any->as.regexp.
src);
7022 gc_mark(objspace, any->as.match.
regexp);
7023 if (any->as.match.
str) {
7024 gc_mark(objspace, any->as.match.
str);
7029 gc_mark(objspace, any->as.rational.num);
7030 gc_mark(objspace, any->as.rational.den);
7034 gc_mark(objspace, any->as.complex.real);
7035 gc_mark(objspace, any->as.complex.imag);
7042 const VALUE *
const ptr = RSTRUCT_CONST_PTR(obj);
7044 for (i=0; i<len; i++) {
7045 gc_mark(objspace, ptr[i]);
7048 if (LIKELY(during_gc) &&
7049 RSTRUCT_TRANSIENT_P(obj)) {
7050 rb_transient_heap_mark(obj, ptr);
7057 rb_gcdebug_print_obj_condition((VALUE)obj);
7062 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
7064 is_pointer_to_heap(objspace, any) ?
"corrupted object" :
"non object");
7073gc_mark_stacked_objects(
rb_objspace_t *objspace,
int incremental,
size_t count)
7077#if GC_ENABLE_INCREMENTAL_MARK
7078 size_t marked_slots_at_the_beginning = objspace->marked_slots;
7079 size_t popped_count = 0;
7082 while (pop_mark_stack(mstack, &obj)) {
7083 if (obj ==
Qundef)
continue;
7085 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(obj)) {
7086 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", obj_info(obj));
7088 gc_mark_children(objspace, obj);
7090#if GC_ENABLE_INCREMENTAL_MARK
7092 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(obj)) {
7093 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
7095 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
7098 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
7108 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
7110 if (is_mark_stack_empty(mstack)) {
7111 shrink_stack_chunk_cache(mstack);
7120gc_mark_stacked_objects_incremental(
rb_objspace_t *objspace,
size_t count)
7122 return gc_mark_stacked_objects(objspace, TRUE, count);
7128 return gc_mark_stacked_objects(objspace, FALSE, 0);
7132#define MAX_TICKS 0x100
7133static tick_t mark_ticks[MAX_TICKS];
7134static const char *mark_ticks_categories[MAX_TICKS];
7137show_mark_ticks(
void)
7140 fprintf(stderr,
"mark ticks result:\n");
7141 for (i=0; i<MAX_TICKS; i++) {
7142 const char *category = mark_ticks_categories[i];
7144 fprintf(stderr,
"%s\t%8lu\n", category, (
unsigned long)mark_ticks[i]);
7155gc_mark_roots(
rb_objspace_t *objspace,
const char **categoryp)
7159 rb_vm_t *vm = rb_ec_vm_ptr(ec);
7162 tick_t start_tick = tick();
7164 const char *prev_category = 0;
7166 if (mark_ticks_categories[0] == 0) {
7167 atexit(show_mark_ticks);
7171 if (categoryp) *categoryp =
"xxx";
7173 objspace->rgengc.parent_object =
Qfalse;
7176#define MARK_CHECKPOINT_PRINT_TICK(category) do { \
7177 if (prev_category) { \
7178 tick_t t = tick(); \
7179 mark_ticks[tick_count] = t - start_tick; \
7180 mark_ticks_categories[tick_count] = prev_category; \
7183 prev_category = category; \
7184 start_tick = tick(); \
7187#define MARK_CHECKPOINT_PRINT_TICK(category)
7190#define MARK_CHECKPOINT(category) do { \
7191 if (categoryp) *categoryp = category; \
7192 MARK_CHECKPOINT_PRINT_TICK(category); \
7195 MARK_CHECKPOINT(
"vm");
7198 if (vm->self) gc_mark(objspace, vm->self);
7200 MARK_CHECKPOINT(
"finalizers");
7201 mark_finalizer_tbl(objspace, finalizer_table);
7203 MARK_CHECKPOINT(
"machine_context");
7204 mark_current_machine_context(objspace, ec);
7207 MARK_CHECKPOINT(
"global_list");
7208 for (list = global_list; list; list = list->next) {
7209 gc_mark_maybe(objspace, *list->varptr);
7212 MARK_CHECKPOINT(
"end_proc");
7215 MARK_CHECKPOINT(
"global_tbl");
7216 rb_gc_mark_global_tbl();
7218 MARK_CHECKPOINT(
"object_id");
7220 mark_tbl_no_pin(objspace, objspace->obj_to_id_tbl);
7222 if (stress_to_class)
rb_gc_mark(stress_to_class);
7224 MARK_CHECKPOINT(
"finish");
7225#undef MARK_CHECKPOINT
7228#if RGENGC_CHECK_MODE >= 4
7230#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
7231#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
7232#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
7240static struct reflist *
7241reflist_create(VALUE obj)
7243 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
7245 refs->list =
ALLOC_N(VALUE, refs->size);
7246 refs->list[0] = obj;
7252reflist_destruct(
struct reflist *refs)
7259reflist_add(
struct reflist *refs, VALUE obj)
7261 if (refs->pos == refs->size) {
7263 SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
7266 refs->list[refs->pos++] = obj;
7270reflist_dump(
struct reflist *refs)
7273 for (i=0; i<refs->pos; i++) {
7274 VALUE obj = refs->list[i];
7275 if (IS_ROOTSIG(obj)) {
7276 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
7279 fprintf(stderr,
"<%s>", obj_info(obj));
7281 if (i+1 < refs->pos) fprintf(stderr,
", ");
7286reflist_referred_from_machine_context(
struct reflist *refs)
7289 for (i=0; i<refs->pos; i++) {
7290 VALUE obj = refs->list[i];
7291 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
7306 const char *category;
7312allrefs_add(
struct allrefs *data, VALUE obj)
7314 struct reflist *refs;
7317 if (st_lookup(data->references, obj, &r)) {
7318 refs = (
struct reflist *)r;
7319 reflist_add(refs, data->root_obj);
7323 refs = reflist_create(data->root_obj);
7324 st_insert(data->references, obj, (st_data_t)refs);
7330allrefs_i(VALUE obj,
void *ptr)
7332 struct allrefs *data = (
struct allrefs *)ptr;
7334 if (allrefs_add(data, obj)) {
7335 push_mark_stack(&data->mark_stack, obj);
7340allrefs_roots_i(VALUE obj,
void *ptr)
7342 struct allrefs *data = (
struct allrefs *)ptr;
7343 if (strlen(data->category) == 0)
rb_bug(
"!!!");
7344 data->root_obj = MAKE_ROOTSIG(data->category);
7346 if (allrefs_add(data, obj)) {
7347 push_mark_stack(&data->mark_stack, obj);
7350#define PUSH_MARK_FUNC_DATA(v) do { \
7351 struct gc_mark_func_data_struct *prev_mark_func_data = GET_RACTOR()->mfd; \
7352 GET_RACTOR()->mfd = (v);
7354#define POP_MARK_FUNC_DATA() GET_RACTOR()->mfd = prev_mark_func_data;} while (0)
7359 struct allrefs data;
7360 struct gc_mark_func_data_struct mfd;
7362 int prev_dont_gc = dont_gc_val();
7365 data.objspace = objspace;
7366 data.references = st_init_numtable();
7367 init_mark_stack(&data.mark_stack);
7369 mfd.mark_func = allrefs_roots_i;
7373 PUSH_MARK_FUNC_DATA(&mfd);
7374 GET_RACTOR()->mfd = &mfd;
7375 gc_mark_roots(objspace, &data.category);
7376 POP_MARK_FUNC_DATA();
7379 while (pop_mark_stack(&data.mark_stack, &obj)) {
7380 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
7382 free_stack_chunks(&data.mark_stack);
7384 dont_gc_set(prev_dont_gc);
7385 return data.references;
7389objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
7391 struct reflist *refs = (
struct reflist *)value;
7392 reflist_destruct(refs);
7397objspace_allrefs_destruct(
struct st_table *refs)
7399 st_foreach(refs, objspace_allrefs_destruct_i, 0);
7400 st_free_table(refs);
7403#if RGENGC_CHECK_MODE >= 5
7405allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
7407 VALUE obj = (
VALUE)k;
7408 struct reflist *refs = (
struct reflist *)v;
7409 fprintf(stderr,
"[allrefs_dump_i] %s <- ", obj_info(obj));
7411 fprintf(stderr,
"\n");
7418 VALUE size = objspace->rgengc.allrefs_table->num_entries;
7419 fprintf(stderr,
"[all refs] (size: %"PRIuVALUE
")\n", size);
7420 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
7425gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
7428 struct reflist *refs = (
struct reflist *)v;
7432 if (!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj)) {
7433 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", obj_info(obj));
7434 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
7437 if (reflist_referred_from_machine_context(refs)) {
7438 fprintf(stderr,
" (marked from machine stack).\n");
7442 objspace->rgengc.error_count++;
7443 fprintf(stderr,
"\n");
7450gc_marks_check(
rb_objspace_t *objspace, st_foreach_callback_func *checker_func,
const char *checker_name)
7452 size_t saved_malloc_increase = objspace->malloc_params.increase;
7453#if RGENGC_ESTIMATE_OLDMALLOC
7454 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
7456 VALUE already_disabled = rb_objspace_gc_disable(objspace);
7458 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
7461 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
7464 if (objspace->rgengc.error_count > 0) {
7465#if RGENGC_CHECK_MODE >= 5
7466 allrefs_dump(objspace);
7468 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
7471 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
7472 objspace->rgengc.allrefs_table = 0;
7474 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
7475 objspace->malloc_params.increase = saved_malloc_increase;
7476#if RGENGC_ESTIMATE_OLDMALLOC
7477 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
7485 size_t live_object_count;
7486 size_t zombie_object_count;
7489 size_t old_object_count;
7490 size_t remembered_shady_count;
7494check_generation_i(
const VALUE child,
void *ptr)
7497 const VALUE parent = data->parent;
7499 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(parent));
7501 if (!RVALUE_OLD_P(child)) {
7502 if (!RVALUE_REMEMBERED(parent) &&
7503 !RVALUE_REMEMBERED(child) &&
7504 !RVALUE_UNCOLLECTIBLE(child)) {
7505 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", obj_info(parent), obj_info(child));
7512check_color_i(
const VALUE child,
void *ptr)
7515 const VALUE parent = data->parent;
7517 if (!RVALUE_WB_UNPROTECTED(parent) && RVALUE_WHITE_P(child)) {
7518 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
7519 obj_info(parent), obj_info(child));
7525check_children_i(
const VALUE child,
void *ptr)
7528 if (check_rvalue_consistency_force(child, FALSE) != 0) {
7529 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
7530 obj_info(child), obj_info(data->parent));
7531 rb_print_backtrace();
7538verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
7544 for (obj = (VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
7545 void *poisoned = asan_poisoned_object_p(obj);
7546 asan_unpoison_object(obj,
false);
7548 if (is_live_object(objspace, obj)) {
7550 data->live_object_count++;
7555 if (!gc_object_moved_p(objspace, obj)) {
7557 rb_objspace_reachable_objects_from(obj, check_children_i, (
void *)data);
7561 if (RVALUE_OLD_P(obj)) data->old_object_count++;
7562 if (RVALUE_WB_UNPROTECTED(obj) && RVALUE_UNCOLLECTIBLE(obj)) data->remembered_shady_count++;
7564 if (!is_marking(objspace) && RVALUE_OLD_P(obj)) {
7567 rb_objspace_reachable_objects_from(obj, check_generation_i, (
void *)data);
7570 if (is_incremental_marking(objspace)) {
7571 if (RVALUE_BLACK_P(obj)) {
7574 rb_objspace_reachable_objects_from(obj, check_color_i, (
void *)data);
7581 data->zombie_object_count++;
7586 asan_poison_object(obj);
7597 unsigned int has_remembered_shady = FALSE;
7598 unsigned int has_remembered_old = FALSE;
7599 int remembered_old_objects = 0;
7600 int free_objects = 0;
7601 int zombie_objects = 0;
7602 int stride = page->slot_size /
sizeof(
RVALUE);
7604 for (i=0; i<page->total_slots; i+=stride) {
7605 VALUE val = (
VALUE)&page->start[i];
7606 void *poisoned = asan_poisoned_object_p(val);
7607 asan_unpoison_object(val,
false);
7609 if (
RBASIC(val) == 0) free_objects++;
7611 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
7612 has_remembered_shady = TRUE;
7614 if (RVALUE_PAGE_MARKING(page, val)) {
7615 has_remembered_old = TRUE;
7616 remembered_old_objects++;
7621 asan_poison_object(val);
7625 if (!is_incremental_marking(objspace) &&
7626 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
7628 for (i=0; i<page->total_slots; i++) {
7629 VALUE val = (
VALUE)&page->start[i];
7630 if (RVALUE_PAGE_MARKING(page, val)) {
7631 fprintf(stderr,
"marking -> %s\n", obj_info(val));
7634 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
7635 (
void *)page, remembered_old_objects, obj ? obj_info(obj) :
"");
7638 if (page->flags.has_uncollectible_shady_objects == FALSE && has_remembered_shady == TRUE) {
7639 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
7640 (
void *)page, obj ? obj_info(obj) :
"");
7645 if (page->free_slots != free_objects) {
7646 rb_bug(
"page %p's free_slots should be %d, but %d\n", (
void *)page, page->free_slots, free_objects);
7649 if (page->final_slots != zombie_objects) {
7650 rb_bug(
"page %p's final_slots should be %d, but %d\n", (
void *)page, page->final_slots, zombie_objects);
7653 return remembered_old_objects;
7659 int remembered_old_objects = 0;
7662 list_for_each(head, page, page_node) {
7663 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
7664 RVALUE *p = page->freelist;
7666 VALUE vp = (
VALUE)p;
7668 asan_unpoison_object(vp,
false);
7670 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", obj_info(vp));
7672 p = p->as.free.next;
7673 asan_poison_object(prev);
7675 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
7677 if (page->flags.has_remembered_objects == FALSE) {
7678 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
7682 return remembered_old_objects;
7688 int remembered_old_objects = 0;
7689 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7690 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_EDEN_HEAP(&size_pools[i])->pages));
7691 remembered_old_objects += gc_verify_heap_pages_(objspace, &(SIZE_POOL_TOMB_HEAP(&size_pools[i])->pages));
7693 return remembered_old_objects;
7707gc_verify_internal_consistency_m(VALUE dummy)
7718 data.objspace = objspace;
7719 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
7722 for (
size_t i = 0; i < heap_allocated_pages; i++) {
7723 struct heap_page *page = heap_pages_sorted[i];
7724 short slot_size = page->slot_size;
7726 uintptr_t start = (uintptr_t)page->start;
7727 uintptr_t end = start + page->total_slots * slot_size;
7729 verify_internal_consistency_i((
void *)start, (
void *)end, slot_size, &data);
7732 if (data.err_count != 0) {
7733#if RGENGC_CHECK_MODE >= 5
7734 objspace->rgengc.error_count = data.err_count;
7735 gc_marks_check(objspace, NULL, NULL);
7736 allrefs_dump(objspace);
7738 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
7742 gc_verify_heap_pages(objspace);
7746 if (!is_lazy_sweeping(objspace) &&
7748 ruby_single_main_ractor != NULL) {
7749 if (objspace_live_slots(objspace) != data.live_object_count) {
7750 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE
", "
7751 "objspace->profile.total_freed_objects: %"PRIdSIZE
"\n",
7752 heap_pages_final_slots, objspace->profile.total_freed_objects);
7753 rb_bug(
"inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7754 objspace_live_slots(objspace), data.live_object_count);
7758 if (!is_marking(objspace)) {
7759 if (objspace->rgengc.old_objects != data.old_object_count) {
7760 rb_bug(
"inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7761 objspace->rgengc.old_objects, data.old_object_count);
7763 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
7764 rb_bug(
"inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
7765 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
7770 size_t list_count = 0;
7773 VALUE z = heap_pages_deferred_final;
7776 z = RZOMBIE(z)->next;
7780 if (heap_pages_final_slots != data.zombie_object_count ||
7781 heap_pages_final_slots != list_count) {
7783 rb_bug(
"inconsistent finalizing object count:\n"
7784 " expect %"PRIuSIZE
"\n"
7785 " but %"PRIuSIZE
" zombies\n"
7786 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
7787 heap_pages_final_slots,
7788 data.zombie_object_count,
7793 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
7803 unsigned int prev_during_gc = during_gc;
7806 gc_verify_internal_consistency_(objspace);
7808 during_gc = prev_during_gc;
7814rb_gc_verify_internal_consistency(
void)
7820gc_verify_transient_heap_internal_consistency(VALUE dmy)
7822 rb_transient_heap_verify();
7832 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
7833 gc_mode_transition(objspace, gc_mode_marking);
7836#if GC_ENABLE_INCREMENTAL_MARK
7837 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / ((objspace->rincgc.pooled_slots / HEAP_PAGE_OBJ_LIMIT) + 1);
7839 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
", "
7840 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
7841 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
7842 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
7844 objspace->flags.during_minor_gc = FALSE;
7845 if (ruby_enable_autocompact) {
7846 objspace->flags.during_compacting |= TRUE;
7848 objspace->profile.major_gc_count++;
7849 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
7850 objspace->rgengc.old_objects = 0;
7851 objspace->rgengc.last_major_gc = objspace->profile.count;
7852 objspace->marked_slots = 0;
7854 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7855 rgengc_mark_and_rememberset_clear(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7859 objspace->flags.during_minor_gc = TRUE;
7860 objspace->marked_slots =
7861 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects;
7862 objspace->profile.minor_gc_count++;
7864 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7865 rgengc_rememberset_mark(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7869 gc_mark_roots(objspace, NULL);
7871 gc_report(1, objspace,
"gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
7872 full_mark ?
"full" :
"minor", mark_stack_size(&objspace->mark_stack));
7875#if GC_ENABLE_INCREMENTAL_MARK
7877gc_marks_wb_unprotected_objects_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bits)
7882 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", obj_info((VALUE)p));
7883 GC_ASSERT(RVALUE_WB_UNPROTECTED((VALUE)p));
7884 GC_ASSERT(RVALUE_MARKED((VALUE)p));
7885 gc_mark_children(objspace, (VALUE)p);
7898 list_for_each(&heap->pages, page, page_node) {
7899 bits_t *mark_bits = page->mark_bits;
7900 bits_t *wbun_bits = page->wb_unprotected_bits;
7904 bits_t bits = mark_bits[0] & wbun_bits[0];
7905 bits >>= NUM_IN_PAGE(p);
7906 gc_marks_wb_unprotected_objects_plane(objspace, (uintptr_t)p, bits);
7907 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
7909 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
7910 bits_t bits = mark_bits[j] & wbun_bits[j];
7912 gc_marks_wb_unprotected_objects_plane(objspace, (uintptr_t)p, bits);
7913 p += BITS_BITLENGTH;
7917 gc_mark_stacked_objects_all(objspace);
7921heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
7923 struct heap_page *page = heap->pooled_pages;
7926 heap->pooled_pages = page->free_next;
7927 heap_add_freepage(heap, page);
7937#if GC_ENABLE_INCREMENTAL_MARK
7939 if (is_incremental_marking(objspace)) {
7940 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7941 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(&size_pools[i]);
7942 if (heap->pooled_pages) {
7943 heap_move_pooled_pages_to_free_pages(heap);
7944 gc_report(1, objspace,
"gc_marks_finish: pooled pages are exists. retry.\n");
7949 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
7950 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
7951 mark_stack_size(&objspace->mark_stack));
7954 gc_mark_roots(objspace, 0);
7956 if (is_mark_stack_empty(&objspace->mark_stack) == FALSE) {
7957 gc_report(1, objspace,
"gc_marks_finish: not empty (%"PRIdSIZE
"). retry.\n",
7958 mark_stack_size(&objspace->mark_stack));
7962#if RGENGC_CHECK_MODE >= 2
7963 if (gc_verify_heap_pages(objspace) != 0) {
7964 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
7968 objspace->flags.during_incremental_marking = FALSE;
7970 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
7971 gc_marks_wb_unprotected_objects(objspace, SIZE_POOL_EDEN_HEAP(&size_pools[i]));
7976#if RGENGC_CHECK_MODE >= 2
7977 gc_verify_internal_consistency(objspace);
7980 if (is_full_marking(objspace)) {
7982 const double r = gc_params.oldobject_limit_factor;
7983 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
7984 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
7987#if RGENGC_CHECK_MODE >= 4
7989 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
7995 size_t total_slots = heap_allocatable_slots(objspace) + heap_eden_total_slots(objspace);
7996 size_t sweep_slots = total_slots - objspace->marked_slots;
7997 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
7998 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
7999 int full_marking = is_full_marking(objspace);
8000 const int r_cnt = GET_VM()->ractor.cnt;
8001 const int r_mul = r_cnt > 8 ? 8 : r_cnt;
8003 GC_ASSERT(heap_eden_total_slots(objspace) >= objspace->marked_slots);
8006 if (max_free_slots < gc_params.heap_init_slots * r_mul) {
8007 max_free_slots = gc_params.heap_init_slots * r_mul;
8010 if (sweep_slots > max_free_slots) {
8011 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
8014 heap_pages_freeable_pages = 0;
8018 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
8019 min_free_slots = gc_params.heap_free_slots * r_mul;
8022 if (sweep_slots < min_free_slots) {
8023 if (!full_marking) {
8024 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
8025 full_marking = TRUE;
8030 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
8031 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_NOFREE;
8038 gc_report(1, objspace,
"gc_marks_finish: heap_set_increment!!\n");
8040 size_pool_allocatable_pages_set(objspace, size_pool, heap_extend_pages(objspace, sweep_slots, total_slots, heap_allocated_pages + heap_allocatable_pages(objspace)));
8042 heap_increment(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8049 const double r = gc_params.oldobject_limit_factor;
8050 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r);
8051 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
8054 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
8055 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_SHADY;
8057 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
8058 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDGEN;
8060 if (RGENGC_FORCE_MAJOR_GC) {
8061 objspace->rgengc.need_major_gc = GPR_FLAG_MAJOR_BY_FORCE;
8064 gc_report(1, objspace,
"gc_marks_finish (marks %"PRIdSIZE
" objects, "
8065 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
8066 "sweep %"PRIdSIZE
" slots, increment: %"PRIdSIZE
", next GC: %s)\n",
8067 objspace->marked_slots, objspace->rgengc.old_objects, heap_eden_total_slots(objspace), sweep_slots, heap_allocatable_pages(objspace),
8068 objspace->rgengc.need_major_gc ?
"major" :
"minor");
8071 rb_transient_heap_finish_marking();
8072 rb_ractor_finish_marking();
8079#if GC_ENABLE_INCREMENTAL_MARK
8083 GC_ASSERT(is_marking(objspace));
8085 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
8086 if (gc_marks_finish(objspace)) {
8091 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
"\n", objspace->marked_slots);
8098 gc_report(1, objspace,
"gc_marks_rest\n");
8100#if GC_ENABLE_INCREMENTAL_MARK
8101 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8102 SIZE_POOL_EDEN_HEAP(&size_pools[i])->pooled_pages = NULL;
8106 if (is_incremental_marking(objspace)) {
8108 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
8109 }
while (gc_marks_finish(objspace) == FALSE);
8112 gc_mark_stacked_objects_all(objspace);
8113 gc_marks_finish(objspace);
8123 GC_ASSERT(dont_gc_val() == FALSE);
8124#if GC_ENABLE_INCREMENTAL_MARK
8126 unsigned int lock_lev;
8127 gc_enter(objspace, gc_enter_event_mark_continue, &lock_lev);
8132 if (heap->pooled_pages) {
8133 while (heap->pooled_pages && slots < HEAP_PAGE_OBJ_LIMIT) {
8134 struct heap_page *page = heap_move_pooled_pages_to_free_pages(heap);
8135 slots += page->free_slots;
8137 from =
"pooled-pages";
8139 else if (heap_increment(objspace, size_pool, heap)) {
8140 slots = heap->free_pages->free_slots;
8141 from =
"incremented-pages";
8145 gc_report(2, objspace,
"gc_marks_continue: provide %d slots from %s.\n",
8147 gc_marks_step(objspace, objspace->rincgc.step_slots);
8150 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
8151 mark_stack_size(&objspace->mark_stack));
8152 gc_marks_rest(objspace);
8155 gc_exit(objspace, gc_enter_event_mark_continue, &lock_lev);
8162 gc_prof_mark_timer_start(objspace);
8166 gc_marks_start(objspace, full_mark);
8167 if (!is_incremental_marking(objspace)) {
8168 gc_marks_rest(objspace);
8171#if RGENGC_PROFILE > 0
8172 if (gc_prof_record(objspace)) {
8174 record->old_objects = objspace->rgengc.old_objects;
8177 gc_prof_mark_timer_stop(objspace);
8183gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...)
8185 if (level <= RGENGC_DEBUG) {
8189 const char *status =
" ";
8192 status = is_full_marking(objspace) ?
"+" :
"-";
8195 if (is_lazy_sweeping(objspace)) {
8198 if (is_incremental_marking(objspace)) {
8203 va_start(args, fmt);
8204 vsnprintf(buf, 1024, fmt, args);
8207 fprintf(out,
"%s|", status);
8215rgengc_remembersetbits_get(
rb_objspace_t *objspace, VALUE obj)
8217 return RVALUE_REMEMBERED(obj);
8221rgengc_remembersetbits_set(
rb_objspace_t *objspace, VALUE obj)
8223 struct heap_page *page = GET_HEAP_PAGE(obj);
8224 bits_t *bits = &page->marking_bits[0];
8226 GC_ASSERT(!is_incremental_marking(objspace));
8228 if (MARKED_IN_BITMAP(bits, obj)) {
8232 page->flags.has_remembered_objects = TRUE;
8233 MARK_IN_BITMAP(bits, obj);
8244 gc_report(6, objspace,
"rgengc_remember: %s %s\n", obj_info(obj),
8245 rgengc_remembersetbits_get(objspace, obj) ?
"was already remembered" :
"is remembered now");
8247 check_rvalue_consistency(obj);
8249 if (RGENGC_CHECK_MODE) {
8250 if (RVALUE_WB_UNPROTECTED(obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", obj_info(obj));
8253#if RGENGC_PROFILE > 0
8254 if (!rgengc_remembered(objspace, obj)) {
8255 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8256 objspace->profile.total_remembered_normal_object_count++;
8257#if RGENGC_PROFILE >= 2
8258 objspace->profile.remembered_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
8264 return rgengc_remembersetbits_set(objspace, obj);
8270 int result = rgengc_remembersetbits_get(objspace, obj);
8271 check_rvalue_consistency(obj);
8278 gc_report(6, objspace,
"rgengc_remembered: %s\n", obj_info(obj));
8279 return rgengc_remembered_sweep(objspace, obj);
8282#ifndef PROFILE_REMEMBERSET_MARK
8283#define PROFILE_REMEMBERSET_MARK 0
8287rgengc_rememberset_mark_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
8292 VALUE obj = (
VALUE)p;
8293 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", obj_info(obj));
8294 GC_ASSERT(RVALUE_UNCOLLECTIBLE(obj));
8295 GC_ASSERT(RVALUE_OLD_P(obj) || RVALUE_WB_UNPROTECTED(obj));
8297 gc_mark_children(objspace, obj);
8310#if PROFILE_REMEMBERSET_MARK
8311 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
8313 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
8315 list_for_each(&heap->pages, page, page_node) {
8316 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_shady_objects) {
8318 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
8319 bits_t *marking_bits = page->marking_bits;
8320 bits_t *uncollectible_bits = page->uncollectible_bits;
8321 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
8322#if PROFILE_REMEMBERSET_MARK
8323 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_shady_objects) has_both++;
8324 else if (page->flags.has_remembered_objects) has_old++;
8325 else if (page->flags.has_uncollectible_shady_objects) has_shady++;
8327 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
8328 bits[j] = marking_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
8329 marking_bits[j] = 0;
8331 page->flags.has_remembered_objects = FALSE;
8334 bitset >>= NUM_IN_PAGE(p);
8335 rgengc_rememberset_mark_plane(objspace, (uintptr_t)p, bitset);
8336 p += (BITS_BITLENGTH - NUM_IN_PAGE(p));
8338 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
8340 rgengc_rememberset_mark_plane(objspace, (uintptr_t)p, bitset);
8341 p += BITS_BITLENGTH;
8344#if PROFILE_REMEMBERSET_MARK
8351#if PROFILE_REMEMBERSET_MARK
8352 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
8354 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
8362 list_for_each(&heap->pages, page, page_node) {
8363 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8364 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8365 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8366 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
8367 page->flags.has_uncollectible_shady_objects = FALSE;
8368 page->flags.has_remembered_objects = FALSE;
8374NOINLINE(
static void gc_writebarrier_generational(VALUE a, VALUE b,
rb_objspace_t *objspace));
8377gc_writebarrier_generational(VALUE a, VALUE b,
rb_objspace_t *objspace)
8379 if (RGENGC_CHECK_MODE) {
8380 if (!RVALUE_OLD_P(a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", obj_info(a));
8381 if ( RVALUE_OLD_P(b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", obj_info(b));
8382 if (is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", obj_info(a), obj_info(b));
8387 if (!rgengc_remembered(objspace, a)) {
8388 RB_VM_LOCK_ENTER_NO_BARRIER();
8390 rgengc_remember(objspace, a);
8392 RB_VM_LOCK_LEAVE_NO_BARRIER();
8393 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", obj_info(a), obj_info(b));
8397 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(b), b);
8398 if (RVALUE_WB_UNPROTECTED(b)) {
8399 gc_remember_unprotected(objspace, b);
8402 RVALUE_AGE_SET_OLD(objspace, b);
8403 rgengc_remember(objspace, b);
8406 gc_report(1, objspace,
"gc_writebarrier_generational: %s -> %s (remembered)\n", obj_info(a), obj_info(b));
8409 check_rvalue_consistency(a);
8410 check_rvalue_consistency(b);
8413#if GC_ENABLE_INCREMENTAL_MARK
8415gc_mark_from(
rb_objspace_t *objspace, VALUE obj, VALUE parent)
8417 gc_mark_set_parent(objspace, parent);
8418 rgengc_check_relation(objspace, obj);
8419 if (gc_mark_set(objspace, obj) == FALSE)
return;
8420 gc_aging(objspace, obj);
8421 gc_grey(objspace, obj);
8424NOINLINE(
static void gc_writebarrier_incremental(VALUE a, VALUE b,
rb_objspace_t *objspace));
8427gc_writebarrier_incremental(VALUE a, VALUE b,
rb_objspace_t *objspace)
8429 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, obj_info(b));
8431 if (RVALUE_BLACK_P(a)) {
8432 if (RVALUE_WHITE_P(b)) {
8433 if (!RVALUE_WB_UNPROTECTED(a)) {
8434 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, obj_info(b));
8435 gc_mark_from(objspace, b, a);
8438 else if (RVALUE_OLD_P(a) && !RVALUE_OLD_P(b)) {
8439 if (!RVALUE_WB_UNPROTECTED(b)) {
8440 gc_report(1, objspace,
"gc_writebarrier_incremental: [GN] %p -> %s\n", (
void *)a, obj_info(b));
8441 RVALUE_AGE_SET_OLD(objspace, b);
8443 if (RVALUE_BLACK_P(b)) {
8444 gc_grey(objspace, b);
8448 gc_report(1, objspace,
"gc_writebarrier_incremental: [LL] %p -> %s\n", (
void *)a, obj_info(b));
8449 gc_remember_unprotected(objspace, b);
8453 if (UNLIKELY(objspace->flags.during_compacting)) {
8454 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
8459#define gc_writebarrier_incremental(a, b, objspace)
8471 if (!is_incremental_marking(objspace)) {
8472 if (!RVALUE_OLD_P(a) || RVALUE_OLD_P(b)) {
8476 gc_writebarrier_generational(a, b, objspace);
8482 RB_VM_LOCK_ENTER_NO_BARRIER();
8484 if (is_incremental_marking(objspace)) {
8485 gc_writebarrier_incremental(a, b, objspace);
8491 RB_VM_LOCK_LEAVE_NO_BARRIER();
8493 if (retry)
goto retry;
8501 if (RVALUE_WB_UNPROTECTED(obj)) {
8507 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", obj_info(obj),
8508 rgengc_remembered(objspace, obj) ?
" (already remembered)" :
"");
8510 if (RVALUE_OLD_P(obj)) {
8511 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", obj_info(obj));
8512 RVALUE_DEMOTE(objspace, obj);
8513 gc_mark_set(objspace, obj);
8514 gc_remember_unprotected(objspace, obj);
8517 objspace->profile.total_shade_operation_count++;
8518#if RGENGC_PROFILE >= 2
8519 objspace->profile.shade_operation_count_types[
BUILTIN_TYPE(obj)]++;
8524 RVALUE_AGE_RESET(obj);
8527 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
8528 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
8535MJIT_FUNC_EXPORTED
void
8536rb_gc_writebarrier_remember(VALUE obj)
8540 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", obj_info(obj));
8542 if (is_incremental_marking(objspace)) {
8543 if (RVALUE_BLACK_P(obj)) {
8544 gc_grey(objspace, obj);
8548 if (RVALUE_OLD_P(obj)) {
8549 rgengc_remember(objspace, obj);
8554static st_table *rgengc_unprotect_logging_table;
8557rgengc_unprotect_logging_exit_func_i(st_data_t key, st_data_t val, st_data_t arg)
8559 fprintf(stderr,
"%s\t%"PRIuVALUE
"\n", (
char *)key, (VALUE)val);
8564rgengc_unprotect_logging_exit_func(
void)
8566 st_foreach(rgengc_unprotect_logging_table, rgengc_unprotect_logging_exit_func_i, 0);
8570rb_gc_unprotect_logging(
void *objptr,
const char *filename,
int line)
8572 VALUE obj = (
VALUE)objptr;
8574 if (rgengc_unprotect_logging_table == 0) {
8575 rgengc_unprotect_logging_table = st_init_strtable();
8576 atexit(rgengc_unprotect_logging_exit_func);
8579 if (RVALUE_WB_UNPROTECTED(obj) == 0) {
8584 snprintf(ptr, 0x100 - 1,
"%s|%s:%d", obj_info(obj), filename, line);
8586 if (st_lookup(rgengc_unprotect_logging_table, (st_data_t)ptr, &cnt)) {
8593 st_insert(rgengc_unprotect_logging_table, (st_data_t)ptr, cnt);
8598rb_copy_wb_protected_attribute(VALUE dest, VALUE obj)
8602 if (RVALUE_WB_UNPROTECTED(obj) && !RVALUE_WB_UNPROTECTED(dest)) {
8603 if (!RVALUE_OLD_P(dest)) {
8604 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
8605 RVALUE_AGE_RESET_RAW(dest);
8608 RVALUE_DEMOTE(objspace, dest);
8612 check_rvalue_consistency(dest);
8618rb_obj_rgengc_writebarrier_protected_p(VALUE obj)
8620 return RVALUE_WB_UNPROTECTED(obj) ?
Qfalse :
Qtrue;
8624rb_obj_rgengc_promoted_p(VALUE obj)
8630rb_obj_gc_flags(VALUE obj, ID* flags,
size_t max)
8633 static ID ID_marked;
8634 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
8637#define I(s) ID_##s = rb_intern(#s);
8647 if (RVALUE_WB_UNPROTECTED(obj) == 0 && n<max) flags[n++] = ID_wb_protected;
8648 if (RVALUE_OLD_P(obj) && n<max) flags[n++] = ID_old;
8649 if (RVALUE_UNCOLLECTIBLE(obj) && n<max) flags[n++] = ID_uncollectible;
8650 if (MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj) && n<max) flags[n++] = ID_marking;
8651 if (MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) && n<max) flags[n++] = ID_marked;
8652 if (MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) && n<max) flags[n++] = ID_pinned;
8661 for (
size_t size_pool_idx = 0; size_pool_idx < SIZE_POOL_COUNT; size_pool_idx++) {
8664 struct heap_page *page = cache->using_page;
8665 RVALUE *freelist = cache->freelist;
8666 RUBY_DEBUG_LOG(
"ractor using_page:%p freelist:%p", (
void *)page, (
void *)freelist);
8668 heap_page_freelist_append(page, freelist);
8670 cache->using_page = NULL;
8671 cache->freelist = NULL;
8681#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
8682#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
8688 if (!is_pointer_to_heap(&
rb_objspace, (
void *)obj))
8693 VALUE ary_ary = GET_VM()->mark_object_ary;
8694 VALUE ary = rb_ary_last(0, 0, ary_ary);
8713 tmp->next = global_list;
8722 struct gc_list *tmp = global_list;
8724 if (tmp->varptr == addr) {
8725 global_list = tmp->next;
8730 if (tmp->next->varptr == addr) {
8731 struct gc_list *t = tmp->next;
8733 tmp->next = tmp->next->next;
8751 gc_stress_no_immediate_sweep,
8752 gc_stress_full_mark_after_malloc,
8756#define gc_stress_full_mark_after_malloc_p() \
8757 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8762 if (!heap->free_pages) {
8763 if (!heap_increment(objspace, size_pool, heap)) {
8764 size_pool_allocatable_pages_set(objspace, size_pool, 1);
8765 heap_increment(objspace, size_pool, heap);
8773 if (dont_gc_val() || during_gc || ruby_disable_gc) {
8774 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
8776 heap_ready_to_gc(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool));
8786gc_reset_malloc_info(
rb_objspace_t *objspace,
bool full_mark)
8788 gc_prof_set_malloc_info(objspace);
8790 size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
8791 size_t old_limit = malloc_limit;
8793 if (inc > malloc_limit) {
8794 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
8795 if (malloc_limit > gc_params.malloc_limit_max) {
8796 malloc_limit = gc_params.malloc_limit_max;
8800 malloc_limit = (size_t)(malloc_limit * 0.98);
8801 if (malloc_limit < gc_params.malloc_limit_min) {
8802 malloc_limit = gc_params.malloc_limit_min;
8807 if (old_limit != malloc_limit) {
8808 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
8812 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
8819#if RGENGC_ESTIMATE_OLDMALLOC
8821 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
8822 objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
8823 objspace->rgengc.oldmalloc_increase_limit =
8824 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
8826 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
8827 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
8831 if (0) fprintf(stderr,
"%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
8833 objspace->rgengc.need_major_gc,
8834 objspace->rgengc.oldmalloc_increase,
8835 objspace->rgengc.oldmalloc_increase_limit,
8836 gc_params.oldmalloc_limit_max);
8840 objspace->rgengc.oldmalloc_increase = 0;
8842 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
8843 objspace->rgengc.oldmalloc_increase_limit =
8844 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
8845 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
8846 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
8854garbage_collect(
rb_objspace_t *objspace,
unsigned int reason)
8860#if GC_PROFILE_MORE_DETAIL
8861 objspace->profile.prepare_time = getrusage_time();
8866#if GC_PROFILE_MORE_DETAIL
8867 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
8870 ret = gc_start(objspace, reason);
8880 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
8881#if GC_ENABLE_INCREMENTAL_MARK
8882 unsigned int immediate_mark = reason & GPR_FLAG_IMMEDIATE_MARK;
8886 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
8889 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
8891 if (!heap_allocated_pages)
return FALSE;
8892 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace))
return TRUE;
8894 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
8895 GC_ASSERT(!is_lazy_sweeping(objspace));
8896 GC_ASSERT(!is_incremental_marking(objspace));
8898 unsigned int lock_lev;
8899 gc_enter(objspace, gc_enter_event_start, &lock_lev);
8901#if RGENGC_CHECK_MODE >= 2
8902 gc_verify_internal_consistency(objspace);
8905 if (ruby_gc_stressful) {
8906 int flag =
FIXNUM_P(ruby_gc_stress_mode) ?
FIX2INT(ruby_gc_stress_mode) : 0;
8908 if ((flag & (1<<gc_stress_no_major)) == 0) {
8909 do_full_mark = TRUE;
8912 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
8915 if (objspace->rgengc.need_major_gc) {
8916 reason |= objspace->rgengc.need_major_gc;
8917 do_full_mark = TRUE;
8919 else if (RGENGC_FORCE_MAJOR_GC) {
8920 reason = GPR_FLAG_MAJOR_BY_FORCE;
8921 do_full_mark = TRUE;
8924 objspace->rgengc.need_major_gc = GPR_FLAG_NONE;
8927 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
8928 reason |= GPR_FLAG_MAJOR_BY_FORCE;
8931#if GC_ENABLE_INCREMENTAL_MARK
8932 if (!GC_ENABLE_INCREMENTAL_MARK || objspace->flags.dont_incremental || immediate_mark) {
8933 objspace->flags.during_incremental_marking = FALSE;
8936 objspace->flags.during_incremental_marking = do_full_mark;
8940 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
8941 objspace->flags.immediate_sweep = TRUE;
8944 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
8946 gc_report(1, objspace,
"gc_start(reason: %x) => %u, %d, %d\n",
8948 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
8950#if USE_DEBUG_COUNTER
8951 RB_DEBUG_COUNTER_INC(gc_count);
8953 if (reason & GPR_FLAG_MAJOR_MASK) {
8954 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
8955 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
8956 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
8957 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
8958#if RGENGC_ESTIMATE_OLDMALLOC
8959 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
8963 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
8964 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
8965 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
8966 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
8967 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
8971 objspace->profile.count++;
8972 objspace->profile.latest_gc_info = reason;
8973 objspace->profile.total_allocated_objects_at_gc_start = objspace->total_allocated_objects;
8974 objspace->profile.heap_used_at_gc_start = heap_allocated_pages;
8975 gc_prof_setup_new_record(objspace, reason);
8976 gc_reset_malloc_info(objspace, do_full_mark);
8977 rb_transient_heap_start_marking(do_full_mark);
8980 GC_ASSERT(during_gc);
8982 gc_prof_timer_start(objspace);
8984 gc_marks(objspace, do_full_mark);
8986 gc_prof_timer_stop(objspace);
8988 gc_exit(objspace, gc_enter_event_start, &lock_lev);
8995 int marking = is_incremental_marking(objspace);
8996 int sweeping = is_lazy_sweeping(objspace);
8998 if (marking || sweeping) {
8999 unsigned int lock_lev;
9000 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
9002 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
9004 if (is_incremental_marking(objspace)) {
9005 gc_marks_rest(objspace);
9007 if (is_lazy_sweeping(objspace)) {
9008 gc_sweep_rest(objspace);
9010 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
9016 unsigned int reason;
9023 if (is_marking(objspace)) {
9025 if (is_full_marking(objspace)) buff[i++] =
'F';
9026#if GC_ENABLE_INCREMENTAL_MARK
9027 if (is_incremental_marking(objspace)) buff[i++] =
'I';
9030 else if (is_sweeping(objspace)) {
9032 if (is_lazy_sweeping(objspace)) buff[i++] =
'L';
9043 static char buff[0x10];
9044 gc_current_status_fill(objspace, buff);
9048#if PRINT_ENTER_EXIT_TICK
9050static tick_t last_exit_tick;
9051static tick_t enter_tick;
9052static int enter_count = 0;
9053static char last_gc_status[0x10];
9056gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9058 if (direction == 0) {
9060 enter_tick = tick();
9061 gc_current_status_fill(objspace, last_gc_status);
9064 tick_t exit_tick = tick();
9065 char current_gc_status[0x10];
9066 gc_current_status_fill(objspace, current_gc_status);
9069 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9070 enter_tick - last_exit_tick,
9071 exit_tick - enter_tick,
9073 last_gc_status, current_gc_status,
9074 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9075 last_exit_tick = exit_tick;
9078 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
9080 exit_tick - enter_tick,
9082 last_gc_status, current_gc_status,
9083 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
9089gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
9096gc_enter_event_cstr(
enum gc_enter_event event)
9099 case gc_enter_event_start:
return "start";
9100 case gc_enter_event_mark_continue:
return "mark_continue";
9101 case gc_enter_event_sweep_continue:
return "sweep_continue";
9102 case gc_enter_event_rest:
return "rest";
9103 case gc_enter_event_finalizer:
return "finalizer";
9104 case gc_enter_event_rb_memerror:
return "rb_memerror";
9110gc_enter_count(
enum gc_enter_event event)
9113 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start);
break;
9114 case gc_enter_event_mark_continue: RB_DEBUG_COUNTER_INC(gc_enter_mark_continue);
break;
9115 case gc_enter_event_sweep_continue: RB_DEBUG_COUNTER_INC(gc_enter_sweep_continue);
break;
9116 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest);
break;
9117 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer);
break;
9118 case gc_enter_event_rb_memerror:
break;
9123#define MEASURE_GC (objspace->flags.measure_gc)
9127gc_enter_event_measure_p(
rb_objspace_t *objspace,
enum gc_enter_event event)
9129 if (!MEASURE_GC)
return false;
9132 case gc_enter_event_start:
9133 case gc_enter_event_mark_continue:
9134 case gc_enter_event_sweep_continue:
9135 case gc_enter_event_rest:
9145static bool current_process_time(
struct timespec *ts);
9148gc_enter_clock(
rb_objspace_t *objspace,
enum gc_enter_event event)
9150 if (gc_enter_event_measure_p(objspace, event)) {
9151 if (!current_process_time(&objspace->profile.start_time)) {
9152 objspace->profile.start_time.tv_sec = 0;
9153 objspace->profile.start_time.tv_nsec = 0;
9159gc_exit_clock(
rb_objspace_t *objspace,
enum gc_enter_event event)
9161 if (gc_enter_event_measure_p(objspace, event)) {
9164 if ((objspace->profile.start_time.tv_sec > 0 ||
9165 objspace->profile.start_time.tv_nsec > 0) &&
9166 current_process_time(&end_time)) {
9168 if (end_time.tv_sec < objspace->profile.start_time.tv_sec) {
9173 (uint64_t)(end_time.tv_sec - objspace->profile.start_time.tv_sec) * (1000 * 1000 * 1000) +
9174 (end_time.tv_nsec - objspace->profile.start_time.tv_nsec);
9175 objspace->profile.total_time_ns += ns;
9182gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9184 RB_VM_LOCK_ENTER_LEV(lock_lev);
9186 gc_enter_clock(objspace, event);
9189 case gc_enter_event_rest:
9190 if (!is_marking(objspace))
break;
9192 case gc_enter_event_start:
9193 case gc_enter_event_mark_continue:
9201 gc_enter_count(event);
9202 if (UNLIKELY(during_gc != 0))
rb_bug(
"during_gc != 0");
9203 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
9205 mjit_gc_start_hook();
9208 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
9209 gc_report(1, objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9210 gc_record(objspace, 0, gc_enter_event_cstr(event));
9215gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
9217 GC_ASSERT(during_gc != 0);
9220 gc_record(objspace, 1, gc_enter_event_cstr(event));
9221 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
9222 gc_report(1, objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
9225 mjit_gc_exit_hook();
9226 gc_exit_clock(objspace, event);
9227 RB_VM_LOCK_LEAVE_LEV(lock_lev);
9231gc_with_gvl(
void *ptr)
9234 return (
void *)(
VALUE)garbage_collect(oar->objspace, oar->reason);
9238garbage_collect_with_gvl(
rb_objspace_t *objspace,
unsigned int reason)
9240 if (dont_gc_val())
return TRUE;
9241 if (ruby_thread_has_gvl_p()) {
9242 return garbage_collect(objspace, reason);
9247 oar.objspace = objspace;
9248 oar.reason = reason;
9253 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
9260gc_start_internal(
rb_execution_context_t *ec, VALUE
self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
9263 unsigned int reason = (GPR_FLAG_FULL_MARK |
9264 GPR_FLAG_IMMEDIATE_MARK |
9265 GPR_FLAG_IMMEDIATE_SWEEP |
9269 if (
RTEST(compact)) {
9272#if !defined(__MINGW32__) && !defined(_WIN32)
9273 if (!USE_MMAP_ALIGNED_ALLOC) {
9274 rb_raise(rb_eNotImpError,
"Compaction isn't available on this platform");
9278 reason |= GPR_FLAG_COMPACT;
9281 if (!
RTEST(full_mark)) reason &= ~GPR_FLAG_FULL_MARK;
9282 if (!
RTEST(immediate_mark)) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
9283 if (!
RTEST(immediate_sweep)) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
9286 garbage_collect(objspace, reason);
9287 gc_finalize_deferred(objspace);
9304 if (
DYNAMIC_SYM_P(obj) && (RSYMBOL(obj)->
id & ~ID_SCOPE_MASK)) {
9332 if (st_is_member(finalizer_table, obj)) {
9336 GC_ASSERT(RVALUE_MARKED(obj));
9337 GC_ASSERT(!RVALUE_PINNED(obj));
9350gc_move(
rb_objspace_t *objspace, VALUE scan, VALUE free,
size_t slot_size)
9359 gc_report(4, objspace,
"Moving object: %p -> %p\n", (
void*)scan, (
void *)free);
9362 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(free), free));
9365 marked = rb_objspace_marked_object_p((VALUE)src);
9366 wb_unprotected = RVALUE_WB_UNPROTECTED((VALUE)src);
9367 uncollectible = RVALUE_UNCOLLECTIBLE((VALUE)src);
9368 marking = RVALUE_MARKING((VALUE)src);
9371 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)src), (VALUE)src);
9372 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)src), (VALUE)src);
9373 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)src), (VALUE)src);
9374 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)src), (VALUE)src);
9379 VALUE already_disabled = rb_gc_disable_no_rest();
9380 rb_mv_generic_ivar((VALUE)src, (VALUE)dest);
9381 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
9384 st_data_t srcid = (st_data_t)src,
id;
9388 if (st_lookup(objspace->obj_to_id_tbl, srcid, &
id)) {
9389 gc_report(4, objspace,
"Moving object with seen id: %p -> %p\n", (
void *)src, (
void *)dest);
9393 VALUE already_disabled = rb_gc_disable_no_rest();
9394 st_delete(objspace->obj_to_id_tbl, &srcid, 0);
9395 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest,
id);
9396 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
9400 memcpy(dest, src, slot_size);
9401 memset(src, 0, slot_size);
9405 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
9408 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS((VALUE)dest), (VALUE)dest);
9412 MARK_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
9415 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS((VALUE)dest), (VALUE)dest);
9418 if (wb_unprotected) {
9419 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
9422 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS((VALUE)dest), (VALUE)dest);
9425 if (uncollectible) {
9426 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
9429 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS((VALUE)dest), (VALUE)dest);
9433 src->as.moved.flags =
T_MOVED;
9434 src->as.moved.dummy =
Qundef;
9435 src->as.moved.destination = (
VALUE)dest;
9442compare_free_slots(
const void *left,
const void *right,
void *dummy)
9447 left_page = *(
struct heap_page *
const *)left;
9448 right_page = *(
struct heap_page *
const *)right;
9450 return left_page->free_slots - right_page->free_slots;
9456 for (
int j = 0; j < SIZE_POOL_COUNT; j++) {
9459 size_t total_pages = SIZE_POOL_EDEN_HEAP(size_pool)->total_pages;
9460 size_t size = size_mul_or_raise(total_pages,
sizeof(
struct heap_page *), rb_eRuntimeError);
9461 struct heap_page *page = 0, **page_list = malloc(size);
9464 list_for_each(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, page, page_node) {
9465 page_list[i++] = page;
9469 GC_ASSERT((
size_t)i == total_pages);
9476 list_head_init(&SIZE_POOL_EDEN_HEAP(size_pool)->pages);
9478 for (i = 0; i < total_pages; i++) {
9479 list_add(&SIZE_POOL_EDEN_HEAP(size_pool)->pages, &page_list[i]->page_node);
9480 if (page_list[i]->free_slots != 0) {
9481 heap_add_freepage(SIZE_POOL_EDEN_HEAP(size_pool), page_list[i]);
9500 for (i = 0; i < len; i++) {
9501 UPDATE_IF_MOVED(objspace, ptr[i]);
9512 for (i = 0; i < len; i++) {
9513 UPDATE_IF_MOVED(objspace, ptr[i]);
9518hash_replace_ref(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
9522 if (gc_object_moved_p(objspace, (VALUE)*key)) {
9526 if (gc_object_moved_p(objspace, (VALUE)*value)) {
9534hash_foreach_replace(st_data_t key, st_data_t value, st_data_t argp,
int error)
9540 if (gc_object_moved_p(objspace, (VALUE)key)) {
9544 if (gc_object_moved_p(objspace, (VALUE)value)) {
9551hash_replace_ref_value(st_data_t *key, st_data_t *value, st_data_t argp,
int existing)
9555 if (gc_object_moved_p(objspace, (VALUE)*value)) {
9563hash_foreach_replace_value(st_data_t key, st_data_t value, st_data_t argp,
int error)
9569 if (gc_object_moved_p(objspace, (VALUE)value)) {
9578 if (!tbl || tbl->num_entries == 0)
return;
9580 if (st_foreach_with_replace(tbl, hash_foreach_replace_value, hash_replace_ref_value, (st_data_t)objspace)) {
9581 rb_raise(rb_eRuntimeError,
"hash modified during iteration");
9588 if (!tbl || tbl->num_entries == 0)
return;
9590 if (st_foreach_with_replace(tbl, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace)) {
9591 rb_raise(rb_eRuntimeError,
"hash modified during iteration");
9600 gc_update_table_refs(objspace, ptr);
9606 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
9614 UPDATE_IF_MOVED(objspace, me->owner);
9615 UPDATE_IF_MOVED(objspace, me->defined_class);
9618 switch (def->type) {
9619 case VM_METHOD_TYPE_ISEQ:
9623 TYPED_UPDATE_IF_MOVED(objspace,
rb_cref_t *, def->body.iseq.
cref);
9625 case VM_METHOD_TYPE_ATTRSET:
9626 case VM_METHOD_TYPE_IVAR:
9627 UPDATE_IF_MOVED(objspace, def->body.attr.location);
9629 case VM_METHOD_TYPE_BMETHOD:
9630 UPDATE_IF_MOVED(objspace, def->body.bmethod.proc);
9632 case VM_METHOD_TYPE_ALIAS:
9635 case VM_METHOD_TYPE_REFINED:
9637 UPDATE_IF_MOVED(objspace, def->body.refined.owner);
9639 case VM_METHOD_TYPE_CFUNC:
9640 case VM_METHOD_TYPE_ZSUPER:
9641 case VM_METHOD_TYPE_MISSING:
9642 case VM_METHOD_TYPE_OPTIMIZED:
9643 case VM_METHOD_TYPE_UNDEF:
9644 case VM_METHOD_TYPE_NOTIMPLEMENTED:
9651gc_update_values(
rb_objspace_t *objspace,
long n, VALUE *values)
9655 for (i=0; i<n; i++) {
9656 UPDATE_IF_MOVED(objspace, values[i]);
9663 switch (imemo_type(obj)) {
9667 if (LIKELY(env->ep)) {
9669 TYPED_UPDATE_IF_MOVED(objspace,
rb_iseq_t *, env->iseq);
9670 UPDATE_IF_MOVED(objspace, env->ep[VM_ENV_DATA_INDEX_ENV]);
9671 gc_update_values(objspace, (
long)env->env_size, (VALUE *)env->env);
9676 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.klass_or_self);
9677 TYPED_UPDATE_IF_MOVED(objspace,
struct rb_cref_struct *, RANY(obj)->as.imemo.cref.next);
9678 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.cref.refinements);
9681 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.cref_or_me);
9682 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.lastline);
9683 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.backref);
9684 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.svar.others);
9686 case imemo_throw_data:
9687 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.throw_data.throw_obj);
9692 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v1);
9693 UPDATE_IF_MOVED(objspace, RANY(obj)->as.imemo.memo.v2);
9696 gc_ref_update_method_entry(objspace, &RANY(obj)->as.imemo.ment);
9699 rb_iseq_update_references((
rb_iseq_t *)obj);
9702 rb_ast_update_references((
rb_ast_t *)obj);
9704 case imemo_callcache:
9708 UPDATE_IF_MOVED(objspace, cc->klass);
9709 if (!is_live_object(objspace, cc->klass)) {
9710 *((VALUE *)(&cc->klass)) = (VALUE)0;
9716 if (!is_live_object(objspace, (VALUE)cc->cme_)) {
9722 case imemo_constcache:
9725 UPDATE_IF_MOVED(objspace, ice->value);
9728 case imemo_parser_strterm:
9730 case imemo_callinfo:
9733 rb_bug(
"not reachable %d", imemo_type(obj));
9738static enum rb_id_table_iterator_result
9739check_id_table_move(ID
id, VALUE value,
void *data)
9743 if (gc_object_moved_p(objspace, (VALUE)value)) {
9744 return ID_TABLE_REPLACE;
9747 return ID_TABLE_CONTINUE;
9759 void *poisoned = asan_poisoned_object_p(value);
9760 asan_unpoison_object(value,
false);
9763 destination = (
VALUE)RMOVED(value)->destination;
9767 destination = value;
9773 asan_poison_object(value);
9777 destination = value;
9783static enum rb_id_table_iterator_result
9784update_id_table(ID *key, VALUE * value,
void *data,
int existing)
9788 if (gc_object_moved_p(objspace, (VALUE)*value)) {
9792 return ID_TABLE_CONTINUE;
9799 rb_id_table_foreach_with_replace(tbl, check_id_table_move, update_id_table, objspace);
9803static enum rb_id_table_iterator_result
9804update_cc_tbl_i(ID
id, VALUE ccs_ptr,
void *data)
9808 VM_ASSERT(vm_ccs_p(ccs));
9810 if (gc_object_moved_p(objspace, (VALUE)ccs->cme)) {
9814 for (
int i=0; i<ccs->len; i++) {
9815 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].ci)) {
9818 if (gc_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
9824 return ID_TABLE_CONTINUE;
9832 rb_id_table_foreach_with_replace(tbl, update_cc_tbl_i, 0, objspace);
9836static enum rb_id_table_iterator_result
9837update_cvc_tbl_i(ID
id, VALUE cvc_entry,
void *data)
9845 return ID_TABLE_CONTINUE;
9853 rb_id_table_foreach_with_replace(tbl, update_cvc_tbl_i, 0, objspace);
9857static enum rb_id_table_iterator_result
9858update_const_table(VALUE value,
void *data)
9863 if (gc_object_moved_p(objspace, ce->value)) {
9867 if (gc_object_moved_p(objspace, ce->file)) {
9871 return ID_TABLE_CONTINUE;
9878 rb_id_table_foreach_values(tbl, update_const_table, objspace);
9885 UPDATE_IF_MOVED(objspace, entry->klass);
9886 entry = entry->next;
9891update_iv_index_tbl_i(st_data_t key, st_data_t value, st_data_t arg)
9895 UPDATE_IF_MOVED(objspace, ent->class_value);
9902 UPDATE_IF_MOVED(objspace, ext->origin_);
9903 UPDATE_IF_MOVED(objspace, ext->refined_class);
9904 update_subclass_entries(objspace, ext->subclasses);
9907 if (ext->iv_index_tbl) {
9908 st_foreach(ext->iv_index_tbl, update_iv_index_tbl_i, (st_data_t)objspace);
9913gc_update_object_references(
rb_objspace_t *objspace, VALUE obj)
9917 gc_report(4, objspace,
"update-refs: %p ->\n", (
void *)obj);
9923 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
9925 if (!RCLASS_EXT(obj))
break;
9926 update_m_tbl(objspace, RCLASS_M_TBL(obj));
9927 update_cc_tbl(objspace, obj);
9928 update_cvc_tbl(objspace, obj);
9930 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9932 update_class_ext(objspace, RCLASS_EXT(obj));
9933 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
9937 if (
FL_TEST(obj, RICLASS_IS_ORIGIN) &&
9938 !
FL_TEST(obj, RICLASS_ORIGIN_SHARED_MTBL)) {
9939 update_m_tbl(objspace, RCLASS_M_TBL(obj));
9942 UPDATE_IF_MOVED(objspace,
RCLASS(obj)->super);
9944 if (!RCLASS_EXT(obj))
break;
9945 if (RCLASS_IV_TBL(obj)) {
9946 gc_update_tbl_refs(objspace, RCLASS_IV_TBL(obj));
9948 update_class_ext(objspace, RCLASS_EXT(obj));
9949 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
9950 update_cc_tbl(objspace, obj);
9954 gc_ref_update_imemo(objspace, obj);
9967 UPDATE_IF_MOVED(objspace, any->as.array.as.heap.aux.
shared_root);
9970 gc_ref_update_array(objspace, obj);
9975 gc_ref_update_hash(objspace, obj);
9976 UPDATE_IF_MOVED(objspace, any->as.hash.ifnone);
9980 if (STR_SHARED_P(obj)) {
9982 VALUE orig_shared = any->as.string.as.heap.aux.
shared;
9984 UPDATE_IF_MOVED(objspace, any->as.string.as.heap.aux.
shared);
9986 VALUE shared = any->as.string.as.heap.aux.
shared;
9987 if (STR_EMBED_P(shared)) {
9988 size_t offset = (size_t)any->as.string.as.heap.
ptr - (
size_t)
RSTRING(orig_shared)->as.embed.ary;
9989 GC_ASSERT(any->as.string.as.heap.
ptr >=
RSTRING(orig_shared)->as.embed.ary);
9990 GC_ASSERT(offset <= (
size_t)
RSTRING(shared)->as.embed.len);
9991 any->as.string.as.heap.
ptr =
RSTRING(shared)->as.embed.ary + offset;
10004 if (compact_func) (*compact_func)(ptr);
10011 gc_ref_update_object(objspace, obj);
10015 if (any->as.file.
fptr) {
10016 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
self);
10017 UPDATE_IF_MOVED(objspace, any->as.file.
fptr->
pathv);
10026 UPDATE_IF_MOVED(objspace, any->as.regexp.
src);
10031 UPDATE_IF_MOVED(objspace, RSYMBOL(any)->fstr);
10040 UPDATE_IF_MOVED(objspace, any->as.match.regexp);
10042 if (any->as.match.str) {
10043 UPDATE_IF_MOVED(objspace, any->as.match.str);
10048 UPDATE_IF_MOVED(objspace, any->as.rational.num);
10049 UPDATE_IF_MOVED(objspace, any->as.rational.den);
10053 UPDATE_IF_MOVED(objspace, any->as.complex.real);
10054 UPDATE_IF_MOVED(objspace, any->as.complex.imag);
10061 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
10063 for (i = 0; i < len; i++) {
10064 UPDATE_IF_MOVED(objspace, ptr[i]);
10070 rb_gcdebug_print_obj_condition((VALUE)obj);
10071 rb_obj_info_dump(obj);
10078 UPDATE_IF_MOVED(objspace,
RBASIC(obj)->klass);
10080 gc_report(4, objspace,
"update-refs: %p <-\n", (
void *)obj);
10086 VALUE v = (
VALUE)vstart;
10087 asan_unpoison_memory_region(&page->freelist,
sizeof(
RVALUE*),
false);
10088 asan_poison_memory_region(&page->freelist,
sizeof(
RVALUE*));
10089 page->flags.has_uncollectible_shady_objects = FALSE;
10090 page->flags.has_remembered_objects = FALSE;
10093 for (; v != (
VALUE)vend; v += stride) {
10094 void *poisoned = asan_poisoned_object_p(v);
10095 asan_unpoison_object(v,
false);
10103 if (RVALUE_WB_UNPROTECTED(v)) {
10104 page->flags.has_uncollectible_shady_objects = TRUE;
10106 if (RVALUE_PAGE_MARKING(page, v)) {
10107 page->flags.has_remembered_objects = TRUE;
10109 if (page->flags.before_sweep) {
10110 if (RVALUE_MARKED(v)) {
10111 gc_update_object_references(objspace, v);
10115 gc_update_object_references(objspace, v);
10120 asan_poison_object(v);
10128#define global_symbols ruby_global_symbols
10134 rb_vm_t *vm = rb_ec_vm_ptr(ec);
10138 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10139 bool should_set_mark_bits = TRUE;
10141 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10143 list_for_each(&heap->pages, page, page_node) {
10144 uintptr_t start = (uintptr_t)page->start;
10145 uintptr_t end = start + (page->total_slots * size_pool->slot_size);
10147 gc_ref_update((
void *)start, (
void *)end, size_pool->slot_size, objspace, page);
10148 if (page == heap->sweeping_page) {
10149 should_set_mark_bits = FALSE;
10151 if (should_set_mark_bits) {
10152 gc_setup_mark_bits(page);
10156 rb_vm_update_references(vm);
10157 rb_transient_heap_update_references();
10158 rb_gc_update_global_tbl();
10160 global_symbols.dsymbol_fstr_hash =
rb_gc_location(global_symbols.dsymbol_fstr_hash);
10161 gc_update_tbl_refs(objspace, objspace->obj_to_id_tbl);
10162 gc_update_table_refs(objspace, objspace->id_to_obj_tbl);
10163 gc_update_table_refs(objspace, global_symbols.str_sym);
10164 gc_update_table_refs(objspace, finalizer_table);
10176 for (i=0; i<
T_MASK; i++) {
10177 if (objspace->rcompactor.considered_count_table[i]) {
10181 if (objspace->rcompactor.moved_count_table[i]) {
10193root_obj_check_moved_i(
const char *category, VALUE obj,
void *data)
10196 rb_bug(
"ROOT %s points to MOVED: %p -> %s\n", category, (
void *)obj, obj_info(
rb_gc_location(obj)));
10201reachable_object_check_moved_i(VALUE ref,
void *data)
10203 VALUE parent = (
VALUE)data;
10205 rb_bug(
"Object %s points to MOVED: %p -> %s\n", obj_info(parent), (
void *)ref, obj_info(
rb_gc_location(ref)));
10210heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
10212 VALUE v = (
VALUE)vstart;
10213 for (; v != (
VALUE)vend; v += stride) {
10218 void *poisoned = asan_poisoned_object_p(v);
10219 asan_unpoison_object(v,
false);
10226 if (!rb_objspace_garbage_object_p(v)) {
10227 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (
void *)v);
10233 asan_poison_object(v);
10247 return gc_compact_stats(ec,
self);
10251gc_verify_compaction_references(
rb_execution_context_t *ec, VALUE
self, VALUE double_heap, VALUE toward_empty)
10258 RB_VM_LOCK_ENTER();
10262 if (
RTEST(double_heap)) {
10263 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10265 rb_heap_t *heap = SIZE_POOL_EDEN_HEAP(size_pool);
10266 heap_add_pages(objspace, size_pool, heap, heap->total_pages);
10270 if (
RTEST(toward_empty)) {
10271 gc_sort_heap_by_empty_slots(objspace);
10274 RB_VM_LOCK_LEAVE();
10278 objspace_reachable_objects_from_root(objspace, root_obj_check_moved_i, NULL);
10279 objspace_each_objects(objspace, heap_check_moved_i, NULL, TRUE);
10281 return gc_compact_stats(ec,
self);
10295 unsigned int reason = GPR_DEFAULT_REASON;
10296 garbage_collect(objspace, reason);
10306#if RGENGC_PROFILE >= 2
10308static const char *type_name(
int type, VALUE obj);
10311gc_count_add_each_types(VALUE hash,
const char *name,
const size_t *types)
10313 VALUE result = rb_hash_new_with_size(
T_MASK);
10315 for (i=0; i<
T_MASK; i++) {
10316 const char *
type = type_name(i, 0);
10336gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const unsigned int orig_flags)
10338 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state;
10339 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
10340#if RGENGC_ESTIMATE_OLDMALLOC
10341 static VALUE sym_oldmalloc;
10343 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
10344 static VALUE sym_none, sym_marking, sym_sweeping;
10347 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
10353 hash = hash_or_key;
10356 rb_raise(rb_eTypeError,
"non-hash or symbol given");
10359 if (
NIL_P(sym_major_by)) {
10360#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
10363 S(immediate_sweep);
10372#if RGENGC_ESTIMATE_OLDMALLOC
10386#define SET(name, attr) \
10387 if (key == sym_##name) \
10389 else if (hash != Qnil) \
10390 rb_hash_aset(hash, sym_##name, (attr));
10393 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
10394 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
10395 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
10396 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
10397#if RGENGC_ESTIMATE_OLDMALLOC
10398 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
10401 SET(major_by, major_by);
10404 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
10405 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
10406 (flags & GPR_FLAG_METHOD) ? sym_method :
10407 (flags & GPR_FLAG_CAPI) ? sym_capi :
10408 (flags & GPR_FLAG_STRESS) ? sym_stress :
10412 SET(have_finalizer, RBOOL(flags & GPR_FLAG_HAVE_FINALIZE));
10413 SET(immediate_sweep, RBOOL(flags & GPR_FLAG_IMMEDIATE_SWEEP));
10415 if (orig_flags == 0) {
10416 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
10417 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
10432 return gc_info_decode(objspace, key, 0);
10444 rb_raise(rb_eTypeError,
"non-hash or symbol given");
10447 return gc_info_decode(objspace, arg, 0);
10453 gc_stat_sym_heap_allocated_pages,
10454 gc_stat_sym_heap_sorted_length,
10455 gc_stat_sym_heap_allocatable_pages,
10456 gc_stat_sym_heap_available_slots,
10457 gc_stat_sym_heap_live_slots,
10458 gc_stat_sym_heap_free_slots,
10459 gc_stat_sym_heap_final_slots,
10460 gc_stat_sym_heap_marked_slots,
10461 gc_stat_sym_heap_eden_pages,
10462 gc_stat_sym_heap_tomb_pages,
10463 gc_stat_sym_total_allocated_pages,
10464 gc_stat_sym_total_freed_pages,
10465 gc_stat_sym_total_allocated_objects,
10466 gc_stat_sym_total_freed_objects,
10467 gc_stat_sym_malloc_increase_bytes,
10468 gc_stat_sym_malloc_increase_bytes_limit,
10469 gc_stat_sym_minor_gc_count,
10470 gc_stat_sym_major_gc_count,
10471 gc_stat_sym_compact_count,
10472 gc_stat_sym_read_barrier_faults,
10473 gc_stat_sym_total_moved_objects,
10474 gc_stat_sym_remembered_wb_unprotected_objects,
10475 gc_stat_sym_remembered_wb_unprotected_objects_limit,
10476 gc_stat_sym_old_objects,
10477 gc_stat_sym_old_objects_limit,
10478#if RGENGC_ESTIMATE_OLDMALLOC
10479 gc_stat_sym_oldmalloc_increase_bytes,
10480 gc_stat_sym_oldmalloc_increase_bytes_limit,
10483 gc_stat_sym_total_generated_normal_object_count,
10484 gc_stat_sym_total_generated_shady_object_count,
10485 gc_stat_sym_total_shade_operation_count,
10486 gc_stat_sym_total_promoted_count,
10487 gc_stat_sym_total_remembered_normal_object_count,
10488 gc_stat_sym_total_remembered_shady_object_count,
10493static VALUE gc_stat_symbols[gc_stat_sym_last];
10496setup_gc_stat_symbols(
void)
10498 if (gc_stat_symbols[0] == 0) {
10499#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
10502 S(heap_allocated_pages);
10503 S(heap_sorted_length);
10504 S(heap_allocatable_pages);
10505 S(heap_available_slots);
10506 S(heap_live_slots);
10507 S(heap_free_slots);
10508 S(heap_final_slots);
10509 S(heap_marked_slots);
10510 S(heap_eden_pages);
10511 S(heap_tomb_pages);
10512 S(total_allocated_pages);
10513 S(total_freed_pages);
10514 S(total_allocated_objects);
10515 S(total_freed_objects);
10516 S(malloc_increase_bytes);
10517 S(malloc_increase_bytes_limit);
10521 S(read_barrier_faults);
10522 S(total_moved_objects);
10523 S(remembered_wb_unprotected_objects);
10524 S(remembered_wb_unprotected_objects_limit);
10526 S(old_objects_limit);
10527#if RGENGC_ESTIMATE_OLDMALLOC
10528 S(oldmalloc_increase_bytes);
10529 S(oldmalloc_increase_bytes_limit);
10532 S(total_generated_normal_object_count);
10533 S(total_generated_shady_object_count);
10534 S(total_shade_operation_count);
10535 S(total_promoted_count);
10536 S(total_remembered_normal_object_count);
10537 S(total_remembered_shady_object_count);
10544gc_stat_internal(VALUE hash_or_sym)
10549 setup_gc_stat_symbols();
10552 hash = hash_or_sym;
10558 rb_raise(rb_eTypeError,
"non-hash or symbol argument");
10561#define SET(name, attr) \
10562 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
10564 else if (hash != Qnil) \
10565 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
10567 SET(count, objspace->profile.count);
10568 SET(time, (
size_t) (objspace->profile.total_time_ns / (1000 * 1000) ));
10571 SET(heap_allocated_pages, heap_allocated_pages);
10572 SET(heap_sorted_length, heap_pages_sorted_length);
10573 SET(heap_allocatable_pages, heap_allocatable_pages(objspace));
10574 SET(heap_available_slots, objspace_available_slots(objspace));
10575 SET(heap_live_slots, objspace_live_slots(objspace));
10576 SET(heap_free_slots, objspace_free_slots(objspace));
10577 SET(heap_final_slots, heap_pages_final_slots);
10578 SET(heap_marked_slots, objspace->marked_slots);
10579 SET(heap_eden_pages, heap_eden_total_pages(objspace));
10580 SET(heap_tomb_pages, heap_tomb_total_pages(objspace));
10581 SET(total_allocated_pages, objspace->profile.total_allocated_pages);
10582 SET(total_freed_pages, objspace->profile.total_freed_pages);
10583 SET(total_allocated_objects, objspace->total_allocated_objects);
10584 SET(total_freed_objects, objspace->profile.total_freed_objects);
10585 SET(malloc_increase_bytes, malloc_increase);
10586 SET(malloc_increase_bytes_limit, malloc_limit);
10587 SET(minor_gc_count, objspace->profile.minor_gc_count);
10588 SET(major_gc_count, objspace->profile.major_gc_count);
10589 SET(compact_count, objspace->profile.compact_count);
10590 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
10591 SET(total_moved_objects, objspace->rcompactor.total_moved);
10592 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
10593 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
10594 SET(old_objects, objspace->rgengc.old_objects);
10595 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
10596#if RGENGC_ESTIMATE_OLDMALLOC
10597 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
10598 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
10602 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
10603 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
10604 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
10605 SET(total_promoted_count, objspace->profile.total_promoted_count);
10606 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
10607 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
10615#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
10616 if (hash !=
Qnil) {
10617 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
10618 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
10619 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->profile.shade_operation_count_types);
10620 gc_count_add_each_types(hash,
"promoted_types", objspace->profile.promoted_types);
10621 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
10622 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
10636 size_t value = gc_stat_internal(arg);
10643 rb_raise(rb_eTypeError,
"non-hash or symbol given");
10646 gc_stat_internal(arg);
10654 size_t value = gc_stat_internal(key);
10658 gc_stat_internal(key);
10667 return ruby_gc_stress_mode;
10673 objspace->flags.gc_stressful =
RTEST(flag);
10674 objspace->gc_stress_mode = flag;
10681 gc_stress_set(objspace, flag);
10689 return rb_objspace_gc_enable(objspace);
10695 int old = dont_gc_val();
10708rb_gc_disable_no_rest(
void)
10711 return gc_disable_no_rest(objspace);
10717 int old = dont_gc_val();
10726 return rb_objspace_gc_disable(objspace);
10733 return gc_disable_no_rest(objspace);
10747#if !defined(__MINGW32__) && !defined(_WIN32)
10748 if (!USE_MMAP_ALIGNED_ALLOC) {
10749 rb_raise(rb_eNotImpError,
"Automatic compaction isn't available on this platform");
10753 ruby_enable_autocompact =
RTEST(v);
10760 return RBOOL(ruby_enable_autocompact);
10764get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
10766 const char *ptr = getenv(name);
10769 if (ptr != NULL && *ptr) {
10772#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
10773 val = strtoll(ptr, &end, 0);
10775 val = strtol(ptr, &end, 0);
10778 case 'k':
case 'K':
10782 case 'm':
case 'M':
10786 case 'g':
case 'G':
10787 unit = 1024*1024*1024;
10791 while (*end && isspace((
unsigned char)*end)) end++;
10793 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
10797 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
10798 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
10803 if (val > 0 && (
size_t)val > lower_bound) {
10805 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name, val, *default_value);
10807 *default_value = (size_t)val;
10812 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
10813 name, val, *default_value, lower_bound);
10822get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
10824 const char *ptr = getenv(name);
10827 if (ptr != NULL && *ptr) {
10829 val =
strtod(ptr, &end);
10830 if (!*ptr || *end) {
10831 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
10835 if (accept_zero && val == 0.0) {
10838 else if (val <= lower_bound) {
10840 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
10841 name, val, *default_value, lower_bound);
10844 else if (upper_bound != 0.0 &&
10845 val > upper_bound) {
10847 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
10848 name, val, *default_value, upper_bound);
10858 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
10859 *default_value = val;
10864gc_set_initial_pages(
void)
10871 min_pages = gc_params.heap_init_slots / HEAP_PAGE_OBJ_LIMIT;
10873 size_t pages_per_class = (min_pages - heap_eden_total_pages(objspace)) / SIZE_POOL_COUNT;
10875 for (
int i = 0; i < SIZE_POOL_COUNT; i++) {
10878 heap_add_pages(objspace, size_pool, SIZE_POOL_EDEN_HEAP(size_pool), pages_per_class);
10881 heap_add_pages(objspace, &size_pools[0], SIZE_POOL_EDEN_HEAP(&size_pools[0]), min_pages - heap_eden_total_pages(objspace));
10927ruby_gc_set_params(
void)
10930 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
10935 if (get_envparam_size(
"RUBY_GC_HEAP_INIT_SLOTS", &gc_params.heap_init_slots, 0)) {
10936 gc_set_initial_pages();
10939 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
10940 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
10941 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
10943 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
10944 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
10945 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
10946 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
10947 get_envparam_double(
"RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
10949 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0);
10950 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
10951 if (!gc_params.malloc_limit_max) {
10952 gc_params.malloc_limit_max = SIZE_MAX;
10954 get_envparam_double(
"RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
10956#if RGENGC_ESTIMATE_OLDMALLOC
10957 if (get_envparam_size(
"RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
10959 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
10961 get_envparam_size (
"RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
10962 get_envparam_double(
"RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
10967reachable_objects_from_callback(VALUE obj)
10970 cr->mfd->mark_func(obj, cr->mfd->data);
10974rb_objspace_reachable_objects_from(VALUE obj,
void (func)(VALUE,
void *),
void *data)
10978 if (during_gc)
rb_bug(
"rb_objspace_reachable_objects_from() is not supported while during_gc == true");
10980 if (is_markable_object(objspace, obj)) {
10982 struct gc_mark_func_data_struct mfd = {
10985 }, *prev_mfd = cr->mfd;
10988 gc_mark_children(objspace, obj);
10989 cr->mfd = prev_mfd;
10994 const char *category;
10995 void (*func)(
const char *category,
VALUE,
void *);
11000root_objects_from(VALUE obj,
void *ptr)
11003 (*data->func)(data->category, obj, data->data);
11007rb_objspace_reachable_objects_from_root(
void (func)(
const char *category, VALUE,
void *),
void *passing_data)
11010 objspace_reachable_objects_from_root(objspace, func, passing_data);
11014objspace_reachable_objects_from_root(
rb_objspace_t *objspace,
void (func)(
const char *category, VALUE,
void *),
void *passing_data)
11016 if (during_gc)
rb_bug(
"objspace_reachable_objects_from_root() is not supported while during_gc == true");
11021 .data = passing_data,
11023 struct gc_mark_func_data_struct mfd = {
11024 .mark_func = root_objects_from,
11026 }, *prev_mfd = cr->mfd;
11029 gc_mark_roots(objspace, &data.category);
11030 cr->mfd = prev_mfd;
11044gc_vraise(
void *ptr)
11047 rb_vraise(argv->exc, argv->fmt, *argv->ap);
11052gc_raise(VALUE exc,
const char *fmt, ...)
11060 if (ruby_thread_has_gvl_p()) {
11070 fprintf(stderr,
"%s",
"[FATAL] ");
11071 vfprintf(stderr, fmt, ap);
11078static void objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t size);
11081negative_size_allocation_error(
const char *msg)
11083 gc_raise(rb_eNoMemError,
"%s", msg);
11087ruby_memerror_body(
void *dummy)
11093NORETURN(
static void ruby_memerror(
void));
11098 if (ruby_thread_has_gvl_p()) {
11107 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
11110 exit(EXIT_FAILURE);
11117 rb_objspace_t *objspace = rb_objspace_of(rb_ec_vm_ptr(ec));
11128 gc_exit(objspace, gc_enter_event_rb_memerror, NULL);
11133 rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
11134 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
11135 exit(EXIT_FAILURE);
11137 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
11138 rb_ec_raised_clear(ec);
11141 rb_ec_raised_set(ec, RAISED_NOMEMORY);
11142 exc = ruby_vm_special_exception_copy(exc);
11145 EC_JUMP_TAG(ec, TAG_RAISE);
11149rb_aligned_malloc(
size_t alignment,
size_t size)
11153#if defined __MINGW32__
11154 res = __mingw_aligned_malloc(size, alignment);
11155#elif defined _WIN32
11156 void *_aligned_malloc(
size_t,
size_t);
11157 res = _aligned_malloc(size, alignment);
11159 if (USE_MMAP_ALIGNED_ALLOC) {
11160 GC_ASSERT(alignment % sysconf(_SC_PAGE_SIZE) == 0);
11162 char *ptr = mmap(NULL, alignment + size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
11163 if (ptr == MAP_FAILED) {
11167 char *aligned = ptr + alignment;
11168 aligned -= ((
VALUE)aligned & (alignment - 1));
11169 GC_ASSERT(aligned > ptr);
11170 GC_ASSERT(aligned <= ptr + alignment);
11172 size_t start_out_of_range_size = aligned - ptr;
11173 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
11174 if (start_out_of_range_size > 0) {
11175 if (munmap(ptr, start_out_of_range_size)) {
11176 rb_bug(
"rb_aligned_malloc: munmap failed for start");
11180 size_t end_out_of_range_size = alignment - start_out_of_range_size;
11181 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
11182 if (end_out_of_range_size > 0) {
11183 if (munmap(aligned + size, end_out_of_range_size)) {
11184 rb_bug(
"rb_aligned_malloc: munmap failed for end");
11188 res = (
void *)aligned;
11191# if defined(HAVE_POSIX_MEMALIGN)
11192 if (posix_memalign(&res, alignment, size) != 0) {
11195# elif defined(HAVE_MEMALIGN)
11196 res = memalign(alignment, size);
11199 res = malloc(alignment + size +
sizeof(
void*));
11200 aligned = (
char*)res + alignment +
sizeof(
void*);
11201 aligned -= ((
VALUE)aligned & (alignment - 1));
11202 ((
void**)aligned)[-1] = res;
11203 res = (
void*)aligned;
11209 GC_ASSERT(((alignment - 1) & alignment) == 0);
11210 GC_ASSERT(alignment %
sizeof(
void*) == 0);
11215rb_aligned_free(
void *ptr,
size_t size)
11217#if defined __MINGW32__
11218 __mingw_aligned_free(ptr);
11219#elif defined _WIN32
11220 _aligned_free(ptr);
11222 if (USE_MMAP_ALIGNED_ALLOC) {
11223 GC_ASSERT(size % sysconf(_SC_PAGE_SIZE) == 0);
11224 if (munmap(ptr, size)) {
11225 rb_bug(
"rb_aligned_free: munmap failed");
11229# if defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
11232 free(((
void**)ptr)[-1]);
11238static inline size_t
11239objspace_malloc_size(
rb_objspace_t *objspace,
void *ptr,
size_t hint)
11241#ifdef HAVE_MALLOC_USABLE_SIZE
11242 return malloc_usable_size(ptr);
11249 MEMOP_TYPE_MALLOC = 0,
11255atomic_sub_nounderflow(
size_t *var,
size_t sub)
11257 if (sub == 0)
return;
11261 if (val < sub) sub = val;
11262 if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val)
break;
11270 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
11271 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
11273 if (gc_stress_full_mark_after_malloc_p()) {
11274 reason |= GPR_FLAG_FULL_MARK;
11276 garbage_collect_with_gvl(objspace, reason);
11281objspace_malloc_increase_report(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
11283 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
11285 type == MEMOP_TYPE_MALLOC ?
"malloc" :
11286 type == MEMOP_TYPE_FREE ?
"free " :
11287 type == MEMOP_TYPE_REALLOC ?
"realloc":
"error",
11288 new_size, old_size);
11293objspace_malloc_increase_body(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
11295 if (new_size > old_size) {
11296 ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
11297#if RGENGC_ESTIMATE_OLDMALLOC
11298 ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
11302 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
11303#if RGENGC_ESTIMATE_OLDMALLOC
11304 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
11308 if (
type == MEMOP_TYPE_MALLOC) {
11311 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
11315 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
11319#if MALLOC_ALLOCATED_SIZE
11320 if (new_size >= old_size) {
11321 ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
11324 size_t dec_size = old_size - new_size;
11325 size_t allocated_size = objspace->malloc_params.allocated_size;
11327#if MALLOC_ALLOCATED_SIZE_CHECK
11328 if (allocated_size < dec_size) {
11329 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
11332 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
11336 case MEMOP_TYPE_MALLOC:
11337 ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
11339 case MEMOP_TYPE_FREE:
11341 size_t allocations = objspace->malloc_params.allocations;
11342 if (allocations > 0) {
11343 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
11345#if MALLOC_ALLOCATED_SIZE_CHECK
11347 GC_ASSERT(objspace->malloc_params.allocations > 0);
11352 case MEMOP_TYPE_REALLOC:
break;
11358#define objspace_malloc_increase(...) \
11359 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
11360 !malloc_increase_done; \
11361 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
11365#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11372#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11373const char *ruby_malloc_info_file;
11374int ruby_malloc_info_line;
11377static inline size_t
11378objspace_malloc_prepare(
rb_objspace_t *objspace,
size_t size)
11380 if (size == 0) size = 1;
11382#if CALC_EXACT_MALLOC_SIZE
11389static inline void *
11390objspace_malloc_fixup(
rb_objspace_t *objspace,
void *mem,
size_t size)
11392 size = objspace_malloc_size(objspace, mem, size);
11393 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC);
11395#if CALC_EXACT_MALLOC_SIZE
11399#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11400 info->gen = objspace->profile.count;
11401 info->file = ruby_malloc_info_file;
11402 info->line = info->file ? ruby_malloc_info_line : 0;
11411#if defined(__GNUC__) && RUBY_DEBUG
11412#define RB_BUG_INSTEAD_OF_RB_MEMERROR
11415#ifdef RB_BUG_INSTEAD_OF_RB_MEMERROR
11416#define TRY_WITH_GC(siz, expr) do { \
11417 const gc_profile_record_flag gpr = \
11418 GPR_FLAG_FULL_MARK | \
11419 GPR_FLAG_IMMEDIATE_MARK | \
11420 GPR_FLAG_IMMEDIATE_SWEEP | \
11422 objspace_malloc_gc_stress(objspace); \
11424 if (LIKELY((expr))) { \
11427 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
11429 rb_bug("TRY_WITH_GC: could not GC"); \
11431 else if ((expr)) { \
11435 rb_bug("TRY_WITH_GC: could not allocate:" \
11436 "%"PRIdSIZE" bytes for %s", \
11441#define TRY_WITH_GC(siz, alloc) do { \
11442 objspace_malloc_gc_stress(objspace); \
11444 (!garbage_collect_with_gvl(objspace, GPR_FLAG_FULL_MARK | \
11445 GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP | \
11446 GPR_FLAG_MALLOC) || \
11461 size = objspace_malloc_prepare(objspace, size);
11462 TRY_WITH_GC(size, mem = malloc(size));
11463 RB_DEBUG_COUNTER_INC(heap_xmalloc);
11464 return objspace_malloc_fixup(objspace, mem, size);
11467static inline size_t
11468xmalloc2_size(
const size_t count,
const size_t elsize)
11470 return size_mul_or_raise(count, elsize, rb_eArgError);
11474objspace_xrealloc(
rb_objspace_t *objspace,
void *ptr,
size_t new_size,
size_t old_size)
11478 if (!ptr)
return objspace_xmalloc0(objspace, new_size);
11485 if (new_size == 0) {
11486 if ((mem = objspace_xmalloc0(objspace, 0)) != NULL) {
11509 objspace_xfree(objspace, ptr, old_size);
11523#if CALC_EXACT_MALLOC_SIZE
11528 old_size = info->size;
11532 old_size = objspace_malloc_size(objspace, ptr, old_size);
11533 TRY_WITH_GC(new_size, mem = realloc(ptr, new_size));
11534 new_size = objspace_malloc_size(objspace, mem, new_size);
11536#if CALC_EXACT_MALLOC_SIZE
11539 info->size = new_size;
11544 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
11546 RB_DEBUG_COUNTER_INC(heap_xrealloc);
11550#if CALC_EXACT_MALLOC_SIZE && USE_GC_MALLOC_OBJ_INFO_DETAILS
11552#define MALLOC_INFO_GEN_SIZE 100
11553#define MALLOC_INFO_SIZE_SIZE 10
11554static size_t malloc_info_gen_cnt[MALLOC_INFO_GEN_SIZE];
11555static size_t malloc_info_gen_size[MALLOC_INFO_GEN_SIZE];
11556static size_t malloc_info_size[MALLOC_INFO_SIZE_SIZE+1];
11557static st_table *malloc_info_file_table;
11560mmalloc_info_file_i(st_data_t key, st_data_t val, st_data_t dmy)
11562 const char *file = (
void *)key;
11563 const size_t *data = (
void *)val;
11565 fprintf(stderr,
"%s\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", file, data[0], data[1]);
11567 return ST_CONTINUE;
11570__attribute__((destructor))
11572rb_malloc_info_show_results(
void)
11576 fprintf(stderr,
"* malloc_info gen statistics\n");
11577 for (i=0; i<MALLOC_INFO_GEN_SIZE; i++) {
11578 if (i == MALLOC_INFO_GEN_SIZE-1) {
11579 fprintf(stderr,
"more\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
11582 fprintf(stderr,
"%d\t%"PRIdSIZE
"\t%"PRIdSIZE
"\n", i, malloc_info_gen_cnt[i], malloc_info_gen_size[i]);
11586 fprintf(stderr,
"* malloc_info size statistics\n");
11587 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
11589 fprintf(stderr,
"%d\t%"PRIdSIZE
"\n", s, malloc_info_size[i]);
11591 fprintf(stderr,
"more\t%"PRIdSIZE
"\n", malloc_info_size[i]);
11593 if (malloc_info_file_table) {
11594 fprintf(stderr,
"* malloc_info file statistics\n");
11595 st_foreach(malloc_info_file_table, mmalloc_info_file_i, 0);
11600rb_malloc_info_show_results(
void)
11606objspace_xfree(
rb_objspace_t *objspace,
void *ptr,
size_t old_size)
11615#if CALC_EXACT_MALLOC_SIZE
11618 old_size = info->size;
11620#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11622 int gen = (int)(objspace->profile.count - info->gen);
11623 int gen_index = gen >= MALLOC_INFO_GEN_SIZE ? MALLOC_INFO_GEN_SIZE-1 : gen;
11626 malloc_info_gen_cnt[gen_index]++;
11627 malloc_info_gen_size[gen_index] += info->size;
11629 for (i=0; i<MALLOC_INFO_SIZE_SIZE; i++) {
11630 size_t s = 16 << i;
11631 if (info->size <= s) {
11632 malloc_info_size[i]++;
11636 malloc_info_size[i]++;
11640 st_data_t key = (st_data_t)info->file, d;
11643 if (malloc_info_file_table == NULL) {
11644 malloc_info_file_table = st_init_numtable_with_size(1024);
11646 if (st_lookup(malloc_info_file_table, key, &d)) {
11648 data = (
size_t *)d;
11651 data = malloc(xmalloc2_size(2,
sizeof(
size_t)));
11652 if (data == NULL)
rb_bug(
"objspace_xfree: can not allocate memory");
11653 data[0] = data[1] = 0;
11654 st_insert(malloc_info_file_table, key, (st_data_t)data);
11657 data[1] += info->size;
11659 if (0 && gen >= 2) {
11661 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d, pos: %s:%"PRIdSIZE
"\n",
11662 info->size, gen, info->file, info->line);
11665 fprintf(stderr,
"free - size:%"PRIdSIZE
", gen:%d\n",
11672 old_size = objspace_malloc_size(objspace, ptr, old_size);
11674 objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
11676 RB_DEBUG_COUNTER_INC(heap_xfree);
11681ruby_xmalloc0(
size_t size)
11687ruby_xmalloc_body(
size_t size)
11689 if ((ssize_t)size < 0) {
11690 negative_size_allocation_error(
"too large allocation size");
11692 return ruby_xmalloc0(size);
11696ruby_malloc_size_overflow(
size_t count,
size_t elsize)
11699 "malloc: possible integer overflow (%"PRIuSIZE
"*%"PRIuSIZE
")",
11704ruby_xmalloc2_body(
size_t n,
size_t size)
11706 return objspace_xmalloc0(&
rb_objspace, xmalloc2_size(n, size));
11714 size = objspace_malloc_prepare(objspace, size);
11715 TRY_WITH_GC(size, mem = calloc1(size));
11716 return objspace_malloc_fixup(objspace, mem, size);
11720ruby_xcalloc_body(
size_t n,
size_t size)
11722 return objspace_xcalloc(&
rb_objspace, xmalloc2_size(n, size));
11725#ifdef ruby_sized_xrealloc
11726#undef ruby_sized_xrealloc
11729ruby_sized_xrealloc(
void *ptr,
size_t new_size,
size_t old_size)
11731 if ((ssize_t)new_size < 0) {
11732 negative_size_allocation_error(
"too large allocation size");
11735 return objspace_xrealloc(&
rb_objspace, ptr, new_size, old_size);
11739ruby_xrealloc_body(
void *ptr,
size_t new_size)
11741 return ruby_sized_xrealloc(ptr, new_size, 0);
11744#ifdef ruby_sized_xrealloc2
11745#undef ruby_sized_xrealloc2
11748ruby_sized_xrealloc2(
void *ptr,
size_t n,
size_t size,
size_t old_n)
11750 size_t len = xmalloc2_size(n, size);
11751 return objspace_xrealloc(&
rb_objspace, ptr, len, old_n * size);
11755ruby_xrealloc2_body(
void *ptr,
size_t n,
size_t size)
11757 return ruby_sized_xrealloc2(ptr, n, size, 0);
11760#ifdef ruby_sized_xfree
11761#undef ruby_sized_xfree
11764ruby_sized_xfree(
void *x,
size_t size)
11774 ruby_sized_xfree(x, 0);
11778rb_xmalloc_mul_add(
size_t x,
size_t y,
size_t z)
11780 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
11785rb_xrealloc_mul_add(
const void *p,
size_t x,
size_t y,
size_t z)
11787 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
11792rb_xmalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
11794 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
11799rb_xcalloc_mul_add_mul(
size_t x,
size_t y,
size_t z,
size_t w)
11801 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
11809ruby_mimmalloc(
size_t size)
11812#if CALC_EXACT_MALLOC_SIZE
11815 mem = malloc(size);
11816#if CALC_EXACT_MALLOC_SIZE
11825#if USE_GC_MALLOC_OBJ_INFO_DETAILS
11837ruby_mimfree(
void *ptr)
11839#if CALC_EXACT_MALLOC_SIZE
11847rb_alloc_tmp_buffer_with_count(
volatile VALUE *store,
size_t size,
size_t cnt)
11855 imemo = rb_imemo_tmpbuf_auto_free_maybe_mark_buffer(NULL, 0);
11857 ptr = ruby_xmalloc0(size);
11865rb_alloc_tmp_buffer(
volatile VALUE *store,
long len)
11869 if (len < 0 || (cnt = (
long)roomof(len,
sizeof(VALUE))) < 0) {
11870 rb_raise(rb_eArgError,
"negative buffer size (or size too big)");
11873 return rb_alloc_tmp_buffer_with_count(store, len, cnt);
11877rb_free_tmp_buffer(
volatile VALUE *store)
11881 void *ptr = ATOMIC_PTR_EXCHANGE(s->ptr, 0);
11887#if MALLOC_ALLOCATED_SIZE
11898gc_malloc_allocated_size(VALUE
self)
11913gc_malloc_allocations(VALUE
self)
11924 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
11926 else if (diff < 0) {
11927 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
11941#define WMAP_DELETE_DEAD_OBJECT_IN_MARK 0
11943#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11945wmap_mark_map(st_data_t key, st_data_t val, st_data_t arg)
11948 VALUE obj = (
VALUE)val;
11949 if (!is_live_object(objspace, obj))
return ST_DELETE;
11950 return ST_CONTINUE;
11955wmap_compact(
void *ptr)
11964wmap_mark(
void *ptr)
11967#if WMAP_DELETE_DEAD_OBJECT_IN_MARK
11974wmap_free_map(st_data_t key, st_data_t val, st_data_t arg)
11976 VALUE *ptr = (VALUE *)val;
11977 ruby_sized_xfree(ptr, (ptr[0] + 1) *
sizeof(VALUE));
11978 return ST_CONTINUE;
11982wmap_free(
void *ptr)
11986 st_free_table(w->obj2wmap);
11987 st_free_table(w->wmap2obj);
11991wmap_memsize_map(st_data_t key, st_data_t val, st_data_t arg)
11993 VALUE *ptr = (VALUE *)val;
11994 *(
size_t *)arg += (ptr[0] + 1) *
sizeof(
VALUE);
11995 return ST_CONTINUE;
11999wmap_memsize(
const void *ptr)
12002 const struct weakmap *w = ptr;
12004 size += st_memsize(w->obj2wmap);
12005 size += st_memsize(w->wmap2obj);
12006 st_foreach(w->obj2wmap, wmap_memsize_map, (st_data_t)&size);
12018 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
12024wmap_allocate(VALUE klass)
12028 w->obj2wmap = rb_init_identtable();
12029 w->wmap2obj = rb_init_identtable();
12030 w->final = rb_func_lambda_new(wmap_finalize, obj, 1, 1);
12040 if (!is_pointer_to_heap(objspace, (
void *)obj))
return FALSE;
12042 void *poisoned = asan_unpoison_object_temporary(obj);
12046 is_live_object(objspace, obj));
12049 asan_poison_object(obj);
12056wmap_final_func(st_data_t *key, st_data_t *value, st_data_t arg,
int existing)
12058 VALUE wmap, *ptr, size, i, j;
12059 if (!existing)
return ST_STOP;
12060 wmap = (
VALUE)arg, ptr = (VALUE *)*value;
12061 for (i = j = 1, size = ptr[0]; i <= size; ++i) {
12062 if (ptr[i] != wmap) {
12067 ruby_sized_xfree(ptr, i *
sizeof(VALUE));
12071 SIZED_REALLOC_N(ptr, VALUE, j + 1, i);
12073 *value = (st_data_t)ptr;
12075 return ST_CONTINUE;
12082 st_data_t orig, wmap, data;
12083 VALUE obj, *rids, i, size;
12089 rb_bug(
"wmap_finalize: objid is not found.");
12093 orig = (st_data_t)obj;
12094 if (st_delete(w->obj2wmap, &orig, &data)) {
12095 rids = (VALUE *)data;
12097 for (i = 0; i < size; ++i) {
12098 wmap = (st_data_t)rids[i];
12099 st_delete(w->wmap2obj, &wmap, NULL);
12101 ruby_sized_xfree((VALUE *)data, (size + 1) *
sizeof(VALUE));
12104 wmap = (st_data_t)obj;
12105 if (st_delete(w->wmap2obj, &wmap, &orig)) {
12106 wmap = (st_data_t)obj;
12107 st_update(w->obj2wmap, orig, wmap_final_func, wmap);
12118wmap_inspect_append(
rb_objspace_t *objspace, VALUE str, VALUE obj)
12123 else if (wmap_live_p(objspace, obj)) {
12127 return rb_str_catf(str,
"#<collected:%p>", (
void*)obj);
12132wmap_inspect_i(st_data_t key, st_data_t val, st_data_t arg)
12136 VALUE str = argp->value;
12137 VALUE k = (
VALUE)key, v = (VALUE)val;
12146 wmap_inspect_append(objspace, str, k);
12148 wmap_inspect_append(objspace, str, v);
12150 return ST_CONTINUE;
12154wmap_inspect(VALUE
self)
12162 str =
rb_sprintf(
"-<%"PRIsVALUE
":%p", c, (
void *)
self);
12166 st_foreach(w->wmap2obj, wmap_inspect_i, (st_data_t)&args);
12174wmap_each_i(st_data_t key, st_data_t val, st_data_t arg)
12177 VALUE obj = (
VALUE)val;
12178 if (wmap_live_p(objspace, obj)) {
12181 return ST_CONTINUE;
12186wmap_each(VALUE
self)
12192 st_foreach(w->wmap2obj, wmap_each_i, (st_data_t)objspace);
12197wmap_each_key_i(st_data_t key, st_data_t val, st_data_t arg)
12200 VALUE obj = (
VALUE)val;
12201 if (wmap_live_p(objspace, obj)) {
12204 return ST_CONTINUE;
12209wmap_each_key(VALUE
self)
12215 st_foreach(w->wmap2obj, wmap_each_key_i, (st_data_t)objspace);
12220wmap_each_value_i(st_data_t key, st_data_t val, st_data_t arg)
12223 VALUE obj = (
VALUE)val;
12224 if (wmap_live_p(objspace, obj)) {
12227 return ST_CONTINUE;
12232wmap_each_value(VALUE
self)
12238 st_foreach(w->wmap2obj, wmap_each_value_i, (st_data_t)objspace);
12243wmap_keys_i(st_data_t key, st_data_t val, st_data_t arg)
12247 VALUE ary = argp->value;
12248 VALUE obj = (
VALUE)val;
12249 if (wmap_live_p(objspace, obj)) {
12252 return ST_CONTINUE;
12257wmap_keys(VALUE
self)
12265 st_foreach(w->wmap2obj, wmap_keys_i, (st_data_t)&args);
12270wmap_values_i(st_data_t key, st_data_t val, st_data_t arg)
12274 VALUE ary = argp->value;
12275 VALUE obj = (
VALUE)val;
12276 if (wmap_live_p(objspace, obj)) {
12279 return ST_CONTINUE;
12284wmap_values(VALUE
self)
12292 st_foreach(w->wmap2obj, wmap_values_i, (st_data_t)&args);
12297wmap_aset_update(st_data_t *key, st_data_t *val, st_data_t arg,
int existing)
12299 VALUE size, *ptr, *optr;
12301 size = (ptr = optr = (VALUE *)*val)[0];
12303 SIZED_REALLOC_N(ptr, VALUE, size + 1, size);
12308 ptr = ruby_xmalloc0(2 *
sizeof(VALUE));
12311 ptr[size] = (
VALUE)arg;
12312 if (ptr == optr)
return ST_STOP;
12313 *val = (st_data_t)ptr;
12314 return ST_CONTINUE;
12319wmap_aset(VALUE
self, VALUE key, VALUE value)
12325 define_final0(value, w->final);
12328 define_final0(key, w->final);
12331 st_update(w->obj2wmap, (st_data_t)value, wmap_aset_update, key);
12332 st_insert(w->wmap2obj, (st_data_t)key, (st_data_t)value);
12333 return nonspecial_obj_id(value);
12338wmap_lookup(VALUE
self, VALUE key)
12346 if (!st_lookup(w->wmap2obj, (st_data_t)key, &data))
return Qundef;
12348 if (!wmap_live_p(objspace, obj))
return Qundef;
12354wmap_aref(VALUE
self, VALUE key)
12356 VALUE obj = wmap_lookup(
self, key);
12362wmap_has_key(VALUE
self, VALUE key)
12369wmap_size(VALUE
self)
12375 n = w->wmap2obj->num_entries;
12376#if SIZEOF_ST_INDEX_T <= SIZEOF_LONG
12387#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
12390current_process_time(
struct timespec *ts)
12392#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
12394 static int try_clock_gettime = 1;
12395 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
12399 try_clock_gettime = 0;
12406 struct rusage usage;
12408 if (getrusage(RUSAGE_SELF, &usage) == 0) {
12409 time = usage.ru_utime;
12410 ts->tv_sec = time.tv_sec;
12411 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
12419 FILETIME creation_time, exit_time, kernel_time, user_time;
12422 if (GetProcessTimes(GetCurrentProcess(),
12423 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
12424 memcpy(&ui, &user_time,
sizeof(FILETIME));
12425#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
12426 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
12427 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
12437getrusage_time(
void)
12440 if (current_process_time(&ts)) {
12441 return ts.tv_sec + ts.tv_nsec * 1e-9;
12450gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason)
12452 if (objspace->profile.run) {
12453 size_t index = objspace->profile.next_index;
12457 objspace->profile.next_index++;
12459 if (!objspace->profile.records) {
12460 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
12461 objspace->profile.records = malloc(xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
12463 if (index >= objspace->profile.size) {
12465 objspace->profile.size += 1000;
12466 ptr = realloc(objspace->profile.records, xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
12468 objspace->profile.records = ptr;
12470 if (!objspace->profile.records) {
12471 rb_bug(
"gc_profile malloc or realloc miss");
12473 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
12477 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
12478#if MALLOC_ALLOCATED_SIZE
12479 record->allocated_size = malloc_allocated_size;
12481#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
12484 struct rusage usage;
12485 if (getrusage(RUSAGE_SELF, &usage) == 0) {
12486 record->maxrss = usage.ru_maxrss;
12487 record->minflt = usage.ru_minflt;
12488 record->majflt = usage.ru_majflt;
12499 if (gc_prof_enabled(objspace)) {
12501#if GC_PROFILE_MORE_DETAIL
12502 record->prepare_time = objspace->profile.prepare_time;
12504 record->gc_time = 0;
12505 record->gc_invoke_time = getrusage_time();
12510elapsed_time_from(
double time)
12512 double now = getrusage_time();
12524 if (gc_prof_enabled(objspace)) {
12526 record->gc_time = elapsed_time_from(record->gc_invoke_time);
12527 record->gc_invoke_time -= objspace->profile.invoke_time;
12531#define RUBY_DTRACE_GC_HOOK(name) \
12532 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
12536 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
12537#if GC_PROFILE_MORE_DETAIL
12538 if (gc_prof_enabled(objspace)) {
12539 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
12547 RUBY_DTRACE_GC_HOOK(MARK_END);
12548#if GC_PROFILE_MORE_DETAIL
12549 if (gc_prof_enabled(objspace)) {
12551 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
12559 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
12560 if (gc_prof_enabled(objspace)) {
12563 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
12564 objspace->profile.gc_sweep_start_time = getrusage_time();
12572 RUBY_DTRACE_GC_HOOK(SWEEP_END);
12574 if (gc_prof_enabled(objspace)) {
12578 if (record->gc_time > 0) {
12579 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
12581 record->gc_time += sweep_time;
12583 else if (GC_PROFILE_MORE_DETAIL) {
12584 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
12587#if GC_PROFILE_MORE_DETAIL
12588 record->gc_sweep_time += sweep_time;
12589 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
12591 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
12598#if GC_PROFILE_MORE_DETAIL
12599 if (gc_prof_enabled(objspace)) {
12601 record->allocate_increase = malloc_increase;
12602 record->allocate_limit = malloc_limit;
12610 if (gc_prof_enabled(objspace)) {
12612 size_t live = objspace->profile.total_allocated_objects_at_gc_start - objspace->profile.total_freed_objects;
12613 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
12615#if GC_PROFILE_MORE_DETAIL
12616 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
12617 record->heap_live_objects = live;
12618 record->heap_free_objects = total - live;
12621 record->heap_total_objects = total;
12622 record->heap_use_size = live *
sizeof(
RVALUE);
12623 record->heap_total_size = total *
sizeof(
RVALUE);
12636gc_profile_clear(VALUE
_)
12639 void *p = objspace->profile.records;
12640 objspace->profile.records = NULL;
12641 objspace->profile.size = 0;
12642 objspace->profile.next_index = 0;
12643 objspace->profile.current_record = 0;
12701gc_profile_record_get(VALUE
_)
12708 if (!objspace->profile.run) {
12712 for (i =0; i < objspace->profile.next_index; i++) {
12724#if GC_PROFILE_MORE_DETAIL
12739#if RGENGC_PROFILE > 0
12750#if GC_PROFILE_MORE_DETAIL
12751#define MAJOR_REASON_MAX 0x10
12754gc_profile_dump_major_reason(
unsigned int flags,
char *buff)
12756 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
12759 if (reason == GPR_FLAG_NONE) {
12765 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
12766 buff[i++] = #x[0]; \
12767 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
12773#if RGENGC_ESTIMATE_OLDMALLOC
12783gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
12786 size_t count = objspace->profile.next_index;
12787#ifdef MAJOR_REASON_MAX
12788 char reason_str[MAJOR_REASON_MAX];
12791 if (objspace->profile.run && count ) {
12795 append(out,
rb_sprintf(
"GC %"PRIuSIZE
" invokes.\n", objspace->profile.count));
12796 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
12798 for (i = 0; i < count; i++) {
12799 record = &objspace->profile.records[i];
12800 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
12801 i+1, record->gc_invoke_time, record->heap_use_size,
12802 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
12805#if GC_PROFILE_MORE_DETAIL
12806 const char *str =
"\n\n" \
12808 "Prepare Time = Previously GC's rest sweep time\n"
12809 "Index Flags Allocate Inc. Allocate Limit"
12810#if CALC_EXACT_MALLOC_SIZE
12813 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
12815 " OldgenObj RemNormObj RemShadObj"
12817#if GC_PROFILE_DETAIL_MEMORY
12818 " MaxRSS(KB) MinorFLT MajorFLT"
12823 for (i = 0; i < count; i++) {
12824 record = &objspace->profile.records[i];
12825 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
12826#
if CALC_EXACT_MALLOC_SIZE
12829 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12831 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
12833#
if GC_PROFILE_DETAIL_MEMORY
12839 gc_profile_dump_major_reason(record->flags, reason_str),
12840 (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
'F' :
'.',
12841 (record->flags & GPR_FLAG_NEWOBJ) ?
"NEWOBJ" :
12842 (record->flags & GPR_FLAG_MALLOC) ?
"MALLOC" :
12843 (record->flags & GPR_FLAG_METHOD) ?
"METHOD" :
12844 (record->flags & GPR_FLAG_CAPI) ?
"CAPI__" :
"??????",
12845 (record->flags & GPR_FLAG_STRESS) ?
'!' :
' ',
12846 record->allocate_increase, record->allocate_limit,
12847#
if CALC_EXACT_MALLOC_SIZE
12848 record->allocated_size,
12850 record->heap_use_pages,
12851 record->gc_mark_time*1000,
12852 record->gc_sweep_time*1000,
12853 record->prepare_time*1000,
12855 record->heap_live_objects,
12856 record->heap_free_objects,
12857 record->removing_objects,
12858 record->empty_objects
12861 record->old_objects,
12862 record->remembered_normal_objects,
12863 record->remembered_shady_objects
12865#
if GC_PROFILE_DETAIL_MEMORY
12867 record->maxrss / 1024,
12890gc_profile_result(VALUE
_)
12907gc_profile_report(
int argc, VALUE *argv, VALUE
self)
12925gc_profile_total_time(VALUE
self)
12930 if (objspace->profile.run && objspace->profile.next_index > 0) {
12932 size_t count = objspace->profile.next_index;
12934 for (i = 0; i < count; i++) {
12935 time += objspace->profile.records[i].gc_time;
12949gc_profile_enable_get(VALUE
self)
12952 return RBOOL(objspace->profile.run);
12964gc_profile_enable(VALUE
_)
12967 objspace->profile.run = TRUE;
12968 objspace->profile.current_record = 0;
12981gc_profile_disable(VALUE
_)
12985 objspace->profile.run = FALSE;
12986 objspace->profile.current_record = 0;
12995type_name(
int type, VALUE obj)
12998#define TYPE_NAME(t) case (t): return #t;
13025 if (obj && rb_objspace_data_type_name(obj)) {
13026 return rb_objspace_data_type_name(obj);
13035obj_type_name(VALUE obj)
13037 return type_name(
TYPE(obj), obj);
13041rb_method_type_name(rb_method_type_t
type)
13044 case VM_METHOD_TYPE_ISEQ:
return "iseq";
13045 case VM_METHOD_TYPE_ATTRSET:
return "attrest";
13046 case VM_METHOD_TYPE_IVAR:
return "ivar";
13047 case VM_METHOD_TYPE_BMETHOD:
return "bmethod";
13048 case VM_METHOD_TYPE_ALIAS:
return "alias";
13049 case VM_METHOD_TYPE_REFINED:
return "refined";
13050 case VM_METHOD_TYPE_CFUNC:
return "cfunc";
13051 case VM_METHOD_TYPE_ZSUPER:
return "zsuper";
13052 case VM_METHOD_TYPE_MISSING:
return "missing";
13053 case VM_METHOD_TYPE_OPTIMIZED:
return "optimized";
13054 case VM_METHOD_TYPE_UNDEF:
return "undef";
13055 case VM_METHOD_TYPE_NOTIMPLEMENTED:
return "notimplemented";
13057 rb_bug(
"rb_method_type_name: unreachable (type: %d)",
type);
13061# define ARY_SHARED_P(ary) \
13062 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13063 FL_TEST((ary),ELTS_SHARED)!=0)
13064# define ARY_EMBED_P(ary) \
13065 (GC_ASSERT(!FL_TEST((ary), ELTS_SHARED) || !FL_TEST((ary), RARRAY_EMBED_FLAG)), \
13066 FL_TEST((ary), RARRAY_EMBED_FLAG)!=0)
13069rb_raw_iseq_info(
char *buff,
const int buff_size,
const rb_iseq_t *iseq)
13071 if (buff_size > 0 && iseq->body && iseq->body->location.label && !
RB_TYPE_P(iseq->body->location.pathobj,
T_MOVED)) {
13072 VALUE path = rb_iseq_path(iseq);
13073 VALUE n = iseq->body->location.first_lineno;
13074 snprintf(buff, buff_size,
" %s@%s:%d",
13082str_len_no_raise(VALUE str)
13085 if (len < 0)
return 0;
13086 if (len > INT_MAX)
return INT_MAX;
13091rb_raw_obj_info(
char *buff,
const int buff_size, VALUE obj)
13094 void *poisoned = asan_poisoned_object_p(obj);
13095 asan_unpoison_object(obj,
false);
13097#define BUFF_ARGS buff + pos, buff_size - pos
13098#define APPENDF(f) if ((pos += snprintf f) >= buff_size) goto end
13100 APPENDF((BUFF_ARGS,
"%s", obj_type_name(obj)));
13103 APPENDF((BUFF_ARGS,
" %ld",
FIX2LONG(obj)));
13110#define TF(c) ((c) != 0 ? "true" : "false")
13111#define C(c, s) ((c) != 0 ? (s) : " ")
13113 const int age = RVALUE_FLAGS_AGE(
RBASIC(obj)->flags);
13115 if (is_pointer_to_heap(&
rb_objspace, (
void *)obj)) {
13116 APPENDF((BUFF_ARGS,
"%p [%d%s%s%s%s%s%s] %s ",
13118 C(RVALUE_UNCOLLECTIBLE_BITMAP(obj),
"L"),
13119 C(RVALUE_MARK_BITMAP(obj),
"M"),
13120 C(RVALUE_PIN_BITMAP(obj),
"P"),
13121 C(RVALUE_MARKING_BITMAP(obj),
"R"),
13122 C(RVALUE_WB_UNPROTECTED_BITMAP(obj),
"U"),
13123 C(rb_objspace_garbage_object_p(obj),
"G"),
13124 obj_type_name(obj)));
13128 APPENDF((BUFF_ARGS,
"%p [%dXXXX] %s",
13130 obj_type_name(obj)));
13133 if (internal_object_p(obj)) {
13136 else if (
RBASIC(obj)->klass == 0) {
13137 APPENDF((BUFF_ARGS,
"(temporary internal)"));
13142 if (!
NIL_P(class_path)) {
13143 APPENDF((BUFF_ARGS,
"(%s)",
RSTRING_PTR(class_path)));
13149 APPENDF((BUFF_ARGS,
"@%s:%d", RANY(obj)->file, RANY(obj)->line));
13154 UNEXPECTED_NODE(rb_raw_obj_info);
13158 APPENDF((BUFF_ARGS,
"shared -> %s",
13159 rb_obj_info(
RARRAY(obj)->as.heap.aux.shared_root)));
13161 else if (
FL_TEST(obj, RARRAY_EMBED_FLAG)) {
13162 APPENDF((BUFF_ARGS,
"[%s%s] len: %ld (embed)",
13163 C(ARY_EMBED_P(obj),
"E"),
13164 C(ARY_SHARED_P(obj),
"S"),
13168 APPENDF((BUFF_ARGS,
"[%s%s%s] len: %ld, capa:%ld ptr:%p",
13169 C(ARY_EMBED_P(obj),
"E"),
13170 C(ARY_SHARED_P(obj),
"S"),
13173 ARY_EMBED_P(obj) ? -1L :
RARRAY(obj)->as.heap.aux.capa,
13178 if (STR_SHARED_P(obj)) APPENDF((BUFF_ARGS,
" [shared] "));
13179 APPENDF((BUFF_ARGS,
"%.*s", str_len_no_raise(obj),
RSTRING_PTR(obj)));
13183 VALUE fstr = RSYMBOL(obj)->fstr;
13184 ID
id = RSYMBOL(obj)->id;
13186 APPENDF((BUFF_ARGS,
":%s id:%d",
RSTRING_PTR(fstr), (
unsigned int)
id));
13189 APPENDF((BUFF_ARGS,
"(%p) id:%d", (
void *)fstr, (
unsigned int)
id));
13198 APPENDF((BUFF_ARGS,
"[%c%c] %"PRIdSIZE,
13199 RHASH_AR_TABLE_P(obj) ?
'A' :
'S',
13200 RHASH_TRANSIENT_P(obj) ?
'T' :
' ',
13208 if (!
NIL_P(class_path)) {
13209 APPENDF((BUFF_ARGS,
"%s",
RSTRING_PTR(class_path)));
13212 APPENDF((BUFF_ARGS,
"(annon)"));
13219 if (!
NIL_P(class_path)) {
13220 APPENDF((BUFF_ARGS,
"src:%s",
RSTRING_PTR(class_path)));
13228 if (RANY(obj)->as.basic.flags & ROBJECT_EMBED) {
13229 APPENDF((BUFF_ARGS,
"(embed) len:%d", len));
13233 APPENDF((BUFF_ARGS,
"len:%d ptr:%p", len, (
void *)ptr));
13241 (block = vm_proc_block(obj)) != NULL &&
13242 (vm_block_type(block) == block_type_iseq) &&
13243 (iseq = vm_block_iseq(block)) != NULL) {
13244 rb_raw_iseq_info(BUFF_ARGS, iseq);
13246 else if (rb_ractor_p(obj)) {
13249 APPENDF((BUFF_ARGS,
"r:%d", r->pub.id));
13253 const char *
const type_name = rb_objspace_data_type_name(obj);
13255 APPENDF((BUFF_ARGS,
"%s", type_name));
13261 APPENDF((BUFF_ARGS,
"<%s> ", rb_imemo_name(imemo_type(obj))));
13263 switch (imemo_type(obj)) {
13268 APPENDF((BUFF_ARGS,
":%s (%s%s%s%s) type:%s alias:%d owner:%p defined_class:%p",
13270 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ?
"pub" :
13271 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ?
"pri" :
"pro",
13272 METHOD_ENTRY_COMPLEMENTED(me) ?
",cmp" :
"",
13273 METHOD_ENTRY_CACHED(me) ?
",cc" :
"",
13274 METHOD_ENTRY_INVALIDATED(me) ?
",inv" :
"",
13275 me->def ? rb_method_type_name(me->def->type) :
"NULL",
13276 me->def ? me->def->alias_count : -1,
13278 (
void *)me->defined_class));
13281 switch (me->def->type) {
13282 case VM_METHOD_TYPE_ISEQ:
13283 APPENDF((BUFF_ARGS,
" (iseq:%s)", obj_info((VALUE)me->def->body.iseq.
iseqptr)));
13294 rb_raw_iseq_info(BUFF_ARGS, iseq);
13297 case imemo_callinfo:
13300 APPENDF((BUFF_ARGS,
"(mid:%s, flag:%x argc:%d, kwarg:%s)",
13304 vm_ci_kwarg(ci) ?
"available" :
"NULL"));
13307 case imemo_callcache:
13313 APPENDF((BUFF_ARGS,
"(klass:%s cme:%s%s (%p) call:%p",
13314 NIL_P(class_path) ? (cc->klass ?
"??" :
"<NULL>") :
RSTRING_PTR(class_path),
13315 cme ?
rb_id2name(cme->called_id) :
"<NULL>",
13316 cme ? (METHOD_ENTRY_INVALIDATED(cme) ?
" [inv]" :
"") :
"",
13318 (
void *)vm_cc_call(cc)));
13333 asan_poison_object(obj);
13342#define OBJ_INFO_BUFFERS_NUM 10
13343#define OBJ_INFO_BUFFERS_SIZE 0x100
13344static int obj_info_buffers_index = 0;
13345static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
13350 const int index = obj_info_buffers_index++;
13351 char *
const buff = &obj_info_buffers[index][0];
13353 if (obj_info_buffers_index >= OBJ_INFO_BUFFERS_NUM) {
13354 obj_info_buffers_index = 0;
13357 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
13363 return obj_type_name(obj);
13367MJIT_FUNC_EXPORTED
const char *
13368rb_obj_info(VALUE obj)
13370 return obj_info(obj);
13374rb_obj_info_dump(VALUE obj)
13377 fprintf(stderr,
"rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
13380MJIT_FUNC_EXPORTED
void
13381rb_obj_info_dump_loc(VALUE obj,
const char *file,
int line,
const char *func)
13384 fprintf(stderr,
"<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
13390rb_gcdebug_print_obj_condition(VALUE obj)
13394 fprintf(stderr,
"created at: %s:%d\n", RANY(obj)->file, RANY(obj)->line);
13397 fprintf(stderr,
"moved?: true\n");
13400 fprintf(stderr,
"moved?: false\n");
13402 if (is_pointer_to_heap(objspace, (
void *)obj)) {
13403 fprintf(stderr,
"pointer to heap?: true\n");
13406 fprintf(stderr,
"pointer to heap?: false\n");
13410 fprintf(stderr,
"marked? : %s\n", MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj) ?
"true" :
"false");
13411 fprintf(stderr,
"pinned? : %s\n", MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj) ?
"true" :
"false");
13412 fprintf(stderr,
"age? : %d\n", RVALUE_AGE(obj));
13413 fprintf(stderr,
"old? : %s\n", RVALUE_OLD_P(obj) ?
"true" :
"false");
13414 fprintf(stderr,
"WB-protected?: %s\n", RVALUE_WB_UNPROTECTED(obj) ?
"false" :
"true");
13415 fprintf(stderr,
"remembered? : %s\n", RVALUE_REMEMBERED(obj) ?
"true" :
"false");
13417 if (is_lazy_sweeping(objspace)) {
13418 fprintf(stderr,
"lazy sweeping?: true\n");
13419 fprintf(stderr,
"swept?: %s\n", is_swept_object(objspace, obj) ?
"done" :
"not yet");
13422 fprintf(stderr,
"lazy sweeping?: false\n");
13429 fprintf(stderr,
"WARNING: object %s(%p) is inadvertently collected\n", (
char *)name, (
void *)obj);
13434rb_gcdebug_sentinel(VALUE obj,
const char *name)
13441#if GC_DEBUG_STRESS_TO_CLASS
13450rb_gcdebug_add_stress_to_class(
int argc, VALUE *argv, VALUE
self)
13454 if (!stress_to_class) {
13470rb_gcdebug_remove_stress_to_class(
int argc, VALUE *argv, VALUE
self)
13475 if (stress_to_class) {
13476 for (i = 0; i < argc; ++i) {
13477 rb_ary_delete_same(stress_to_class, argv[i]);
13480 stress_to_class = 0;
13549 VALUE rb_mObjSpace;
13550 VALUE rb_mProfiler;
13551 VALUE gc_constants;
13586 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError,
"failed to allocate memory");
13616#if MALLOC_ALLOCATED_SIZE
13621#if GC_DEBUG_STRESS_TO_CLASS
13630#define OPT(o) if (o) rb_ary_push(opts, rb_fstring_lit(#o))
13634 OPT(RGENGC_CHECK_MODE);
13635 OPT(RGENGC_PROFILE);
13636 OPT(RGENGC_ESTIMATE_OLDMALLOC);
13637 OPT(GC_PROFILE_MORE_DETAIL);
13638 OPT(GC_ENABLE_LAZY_SWEEP);
13639 OPT(CALC_EXACT_MALLOC_SIZE);
13640 OPT(MALLOC_ALLOCATED_SIZE);
13641 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
13642 OPT(GC_PROFILE_DETAIL_MEMORY);
13651#ifdef ruby_xmalloc2
13652#undef ruby_xmalloc2
13657#ifdef ruby_xrealloc
13658#undef ruby_xrealloc
13660#ifdef ruby_xrealloc2
13661#undef ruby_xrealloc2
13667#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13668 ruby_malloc_info_file = __FILE__;
13669 ruby_malloc_info_line = __LINE__;
13671 return ruby_xmalloc_body(size);
13677#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13678 ruby_malloc_info_file = __FILE__;
13679 ruby_malloc_info_line = __LINE__;
13681 return ruby_xmalloc2_body(n, size);
13687#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13688 ruby_malloc_info_file = __FILE__;
13689 ruby_malloc_info_line = __LINE__;
13691 return ruby_xcalloc_body(n, size);
13697#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13698 ruby_malloc_info_file = __FILE__;
13699 ruby_malloc_info_line = __LINE__;
13701 return ruby_xrealloc_body(ptr, new_size);
13707#if USE_GC_MALLOC_OBJ_INFO_DETAILS
13708 ruby_malloc_info_file = __FILE__;
13709 ruby_malloc_info_line = __LINE__;
13711 return ruby_xrealloc2_body(ptr, n, new_size);
#define RUBY_ASSERT_ALWAYS(expr)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define RUBY_ALIGNOF
Wraps (or simulates) alignof.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register_one(), except it additionally checks for duplicated registrati...
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
VALUE rb_define_module(const char *name)
Defines a top-level module.
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for a module.
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define TYPE(_)
Old name of rb_type.
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
#define T_FILE
Old name of RUBY_T_FILE.
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define T_MASK
Old name of RUBY_T_MASK.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define T_NIL
Old name of RUBY_T_NIL.
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define ULONG2NUM
Old name of RB_ULONG2NUM.
#define ELTS_SHARED
Old name of RUBY_ELTS_SHARED.
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define SYM2ID
Old name of RB_SYM2ID.
#define T_DATA
Old name of RUBY_T_DATA.
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
#define FL_PROMOTED0
Old name of RUBY_FL_PROMOTED0.
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
#define CLASS_OF
Old name of rb_class_of.
#define T_NONE
Old name of RUBY_T_NONE.
#define T_NODE
Old name of RUBY_T_NODE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_MODULE
Old name of RUBY_T_MODULE.
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_ABLE
Old name of RB_FL_ABLE.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define LONG2NUM
Old name of RB_LONG2NUM.
#define T_FALSE
Old name of RUBY_T_FALSE.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define T_UNDEF
Old name of RUBY_T_UNDEF.
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define DYNAMIC_SYM_P
Old name of RB_DYNAMIC_SYM_P.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define DBL2NUM
Old name of rb_float_new.
#define T_MATCH
Old name of RUBY_T_MATCH.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define OBJ_PROMOTED
Old name of RB_OBJ_PROMOTED.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define FL_TEST
Old name of RB_FL_TEST.
#define FL_PROMOTED1
Old name of RUBY_FL_PROMOTED1.
#define xcalloc
Old name of ruby_xcalloc.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define UINT2NUM
Old name of RB_UINT2NUM.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
int ruby_stack_check(void)
Checks for stack overflow.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports always regardless of runtime -W flag.
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
VALUE rb_mEnumerable
Enumerable module.
VALUE rb_stdout
STDOUT constant.
void rb_gc_register_address(VALUE *valptr)
Inform the garbage collector that valptr points to a live Ruby object that should not be moved.
void rb_gc_unregister_address(VALUE *valptr)
Inform the garbage collector that a pointer previously passed to rb_gc_register_address() no longer p...
void rb_global_variable(VALUE *)
An alias for rb_gc_register_address().
void rb_gc_register_mark_object(VALUE object)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
VALUE rb_ary_cat(VALUE ary, const VALUE *train, long len)
Destructively appends multiple elements at the end of the array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_tmp_new(long capa)
Allocates a "temporary" array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_big_eql(VALUE lhs, VALUE rhs)
Equality, in terms of eql?.
VALUE rb_obj_is_fiber(VALUE obj)
Queries if an object is a fiber.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
#define rb_check_frozen
Just another name of rb_check_frozen.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
void rb_gc_mark(VALUE obj)
Marks an object.
void rb_mark_tbl_no_pin(struct st_table *tbl)
Identical to rb_mark_tbl(), except it marks objects using rb_gc_mark_movable().
void rb_memerror(void)
Triggers out-of-memory error.
size_t rb_gc_stat(VALUE key_or_buf)
Obtains various GC related profiles.
void rb_gc_mark_movable(VALUE obj)
Maybe this is the only function provided for C extensions to control the pinning of objects,...
VALUE rb_gc_disable(void)
Disables GC.
VALUE rb_gc_start(void)
Identical to rb_gc(), except the return value.
VALUE rb_gc_latest_gc_info(VALUE key_or_buf)
Obtains various info regarding the most recent GC run.
void rb_mark_tbl(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only values of the table and leave their associated keys...
VALUE rb_gc_enable(void)
(Re-) enables GC.
void rb_mark_hash(struct st_table *tbl)
Marks keys and values associated inside of the given table.
VALUE rb_undefine_finalizer(VALUE obj)
Modifies the object so that it has no finalisers at all.
int rb_during_gc(void)
Queries if the GC is busy.
void rb_gc_mark_maybe(VALUE obj)
Identical to rb_gc_mark(), except it allows the passed value be a non-object.
VALUE rb_gc_location(VALUE obj)
Finds a new "location" of an object.
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Marks objects between the two pointers.
void rb_gc(void)
Triggers a GC process.
void rb_gc_force_recycle(VALUE obj)
Asserts that the passed object is no longer needed.
void rb_gc_update_tbl_refs(st_table *ptr)
Updates references inside of tables.
void rb_mark_set(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only keys of the table and leave their associated values...
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Assigns a finaliser for an object.
void rb_gc_copy_finalizer(VALUE dst, VALUE src)
Copy&paste an object's finaliser to another.
void rb_gc_adjust_memory_usage(ssize_t diff)
Informs that there are external memory usages.
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
VALUE rb_hash_new(void)
Creates a new, empty hash object.
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
VALUE rb_obj_id(VALUE obj)
Finds or creates an integer primary key of the given object.
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_proc_new(rb_block_call_func_t func, VALUE callback_arg)
This is an rb_iterate() + rb_block_proc() combo.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_str_free(VALUE str)
Destroys the given string for no reason.
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
VALUE rb_str_cat2(VALUE, const char *)
Just another name of rb_str_cat_cstr.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
VALUE rb_sym2str(VALUE id)
Identical to rb_id2str(), except it takes an instance of rb_cSymbol rather than an ID.
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
VALUE rb_id2str(ID id)
Identical to rb_id2name(), except it returns a Ruby's String instead of C's.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define strtod(s, e)
Just another name of ruby_strtod.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define strdup(s)
Just another name of ruby_strdup.
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield_values(int n,...)
Identical to rb_yield(), except it takes variadic number of parameters and pass them to the block.
VALUE rb_yield(VALUE val)
Yields the block.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define MEMMOVE(p1, p2, type, n)
Handy macro to call memmove.
VALUE type(ANYARGS)
ANYARGS-ed function type.
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
VALUE rb_newobj(void)
This is the implementation detail of RB_NEWOBJ.
VALUE rb_newobj_of(VALUE klass, VALUE flags)
This is the implementation detail of RB_NEWOBJ_OF.
#define PRI_PIDT_PREFIX
A rb_sprintf() format prefix to be used for a pid_t parameter.
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_CONST_PTR_TRANSIENT
Just another name of rb_array_const_ptr_transient.
#define RARRAY(obj)
Convenient casting macro.
static bool RARRAY_TRANSIENT_P(VALUE ary)
Queries if the array is a transient array.
#define RARRAY_AREF(a, i)
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define RCLASS(obj)
Convenient casting macro.
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
#define DATA_PTR(obj)
Convenient getter macro.
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
#define RDATA(obj)
Convenient casting macro.
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
#define RFILE(obj)
Convenient casting macro.
void rb_gc_writebarrier(VALUE old, VALUE young)
This is the implementation of RB_OBJ_WRITE().
void rb_gc_writebarrier_unprotect(VALUE obj)
This is the implementation of RB_OBJ_WB_UNPROTECT().
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
#define RHASH_SIZE(h)
Queries the size of the hash.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define RMATCH(obj)
Convenient casting macro.
@ ROBJECT_EMBED_LEN_MAX
Max possible number of instance variables that can be embedded.
static uint32_t ROBJECT_NUMIV(VALUE obj)
Queries the number of instance variables.
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
#define RREGEXP_PTR(obj)
Convenient accessor macro.
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
#define RSTRING(obj)
Convenient casting macro.
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
static VALUE rb_special_const_p(VALUE obj)
Identical to RB_SPECIAL_CONST_P, except it returns a VALUE.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
const VALUE shared_root
Parent of the array.
Ruby's object's, base components.
const VALUE klass
Class of an object.
VALUE flags
Per-object flags.
Internal header for Complex.
void * data
Pointer to the actual C level struct that you want to wrap.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
struct rb_io_t * fptr
IO's specific fields.
Regular expression execution context.
VALUE regexp
The expression of this match.
VALUE str
The target string that the match was made against.
Internal header for Rational.
Ruby's regular expression.
const VALUE src
Source code of this expression.
VALUE shared
Parent of the string.
char * ptr
Pointer to the contents of the string.
const rb_data_type_t * type
This field stores various information about how Ruby should handle a data.
This is the struct that holds necessary info for a struct.
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
const char * wrap_struct_name
Name of structs of this kind.
VALUE ecopts
Flags as Ruby hash.
Ruby's IO, metadata and buffers.
struct rb_io_t::rb_io_enc_t encs
Decomposed encoding flags.
VALUE writeconv_asciicompat
This is, when set, an instance of rb_cString which holds the "common" encoding.
VALUE pathv
pathname for file
VALUE write_lock
This is a Ruby level mutex.
VALUE self
The IO's Ruby level counterpart.
VALUE writeconv_pre_ecopts
Value of rb_io_t::rb_io_enc_t::ecopts stored right before initialising rb_io_t::writeconv.
VALUE tied_io_for_writing
Duplex IO object, if set.
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
rb_cref_t * cref
class reference, should be marked
Internal header for Class.
This is an implementation detail of rbimpl_size_mul_overflow().
size_t right
Multiplication result.
Represents the region of a capture group.
int char_offset_num_allocated
Number of rmatch_offset that rmatch::char_offset holds.
struct rmatch_offset * char_offset
Capture group offsets, in C array.
struct re_registers regs
"Registers" of a match.
IFUNC (Internal FUNCtion)
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
@ RUBY_T_MASK
Bitmask of ruby_value_type.
void * ruby_xrealloc(void *ptr, size_t newsiz)
Resize the storage instance.
void * ruby_xrealloc2(void *ptr, size_t newelems, size_t newsiz)
Identical to ruby_xrealloc(), except it resizes the given storage instance to newelems * newsiz bytes...
void ruby_xfree(void *ptr)
Deallocates a storage instance.
void * ruby_xmalloc2(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc(), except it allocates nelems * elemsiz bytes.
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
void * ruby_xcalloc(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc2(), except it returns a zero-filled storage instance.