Ruby 3.1.3p185 (2022-11-24 revision 1a6b16756e0ba6b95ab71a441357ed5484e33498)
vm_sync.c
1#include "vm_core.h"
2#include "vm_sync.h"
3#include "ractor_core.h"
4#include "vm_debug.h"
5#include "gc.h"
6
7static bool vm_barrier_finish_p(rb_vm_t *vm);
8
9static bool
10vm_locked(rb_vm_t *vm)
11{
12 return vm->ractor.sync.lock_owner == GET_RACTOR();
13}
14
15#if RUBY_DEBUG > 0
16void
17RUBY_ASSERT_vm_locking(void)
18{
19 if (rb_multi_ractor_p()) {
20 rb_vm_t *vm = GET_VM();
21 VM_ASSERT(vm_locked(vm));
22 }
23}
24
25void
26RUBY_ASSERT_vm_unlocking(void)
27{
28 if (rb_multi_ractor_p()) {
29 rb_vm_t *vm = GET_VM();
30 VM_ASSERT(!vm_locked(vm));
31 }
32}
33#endif
34
35bool
36rb_vm_locked_p(void)
37{
38 return vm_locked(GET_VM());
39}
40
41static void
42vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
43{
44 RUBY_DEBUG_LOG2(file, line, "start locked:%d", locked);
45
46 if (locked) {
47 ASSERT_vm_locking();
48 }
49 else {
50#if RACTOR_CHECK_MODE
51 // locking ractor and acquire VM lock will cause deadlock
52 VM_ASSERT(cr->sync.locked_by != rb_ractor_self(cr));
53#endif
54
55 // lock
56 rb_native_mutex_lock(&vm->ractor.sync.lock);
57 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
58 vm->ractor.sync.lock_owner = cr;
59
60 if (!no_barrier) {
61 // barrier
62 while (vm->ractor.sync.barrier_waiting) {
63 unsigned int barrier_cnt = vm->ractor.sync.barrier_cnt;
64 rb_thread_t *th = GET_THREAD();
65 bool running;
66
67 RB_GC_SAVE_MACHINE_CONTEXT(th);
68
69 if (rb_ractor_status_p(cr, ractor_running)) {
70 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
71 running = true;
72 }
73 else {
74 running = false;
75 }
76 VM_ASSERT(rb_ractor_status_p(cr, ractor_blocking));
77
78 if (vm_barrier_finish_p(vm)) {
79 RUBY_DEBUG_LOG("wakeup barrier owner");
80 rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
81 }
82 else {
83 RUBY_DEBUG_LOG("wait for barrier finish");
84 }
85
86 // wait for restart
87 while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
88 vm->ractor.sync.lock_owner = NULL;
89 rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
90 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
91 vm->ractor.sync.lock_owner = cr;
92 }
93
94 RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock");
95
96 if (running) {
97 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
98 }
99 }
100 }
101
102 VM_ASSERT(vm->ractor.sync.lock_rec == 0);
103 VM_ASSERT(vm->ractor.sync.lock_owner == cr);
104 }
105
106 vm->ractor.sync.lock_rec++;
107 *lev = vm->ractor.sync.lock_rec;
108
109 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%d", vm->ractor.sync.lock_rec, rb_ractor_id(vm->ractor.sync.lock_owner));
110}
111
112static void
113vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
114{
115 RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%d", vm->ractor.sync.lock_rec, rb_ractor_id(vm->ractor.sync.lock_owner));
116
117 ASSERT_vm_locking();
118 VM_ASSERT(vm->ractor.sync.lock_rec > 0);
119 VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
120
121 vm->ractor.sync.lock_rec--;
122 *lev = vm->ractor.sync.lock_rec;
123
124 if (vm->ractor.sync.lock_rec == 0) {
125 vm->ractor.sync.lock_owner = NULL;
126 rb_native_mutex_unlock(&vm->ractor.sync.lock);
127 }
128}
129
130MJIT_FUNC_EXPORTED void
131rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS)
132{
133 rb_vm_t *vm = GET_VM();
134 if (vm_locked(vm)) {
135 vm_lock_enter(NULL, vm, true, false, lev APPEND_LOCATION_PARAMS);
136 }
137 else {
138 vm_lock_enter(GET_RACTOR(), vm, false, false, lev APPEND_LOCATION_PARAMS);
139 }
140}
141
142MJIT_FUNC_EXPORTED void
143rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
144{
145 rb_vm_t *vm = GET_VM();
146 if (vm_locked(vm)) {
147 vm_lock_enter(NULL, vm, true, true, lev APPEND_LOCATION_PARAMS);
148 }
149 else {
150 vm_lock_enter(GET_RACTOR(), vm, false, true, lev APPEND_LOCATION_PARAMS);
151 }
152}
153
154MJIT_FUNC_EXPORTED void
155rb_vm_lock_enter_body_cr(rb_ractor_t *cr, unsigned int *lev APPEND_LOCATION_ARGS)
156{
157 rb_vm_t *vm = GET_VM();
158 vm_lock_enter(cr, vm, vm_locked(vm), false, lev APPEND_LOCATION_PARAMS);
159}
160
161MJIT_FUNC_EXPORTED void
162rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
163{
164 vm_lock_leave(GET_VM(), lev APPEND_LOCATION_PARAMS);
165}
166
167void
168rb_vm_lock_body(LOCATION_ARGS)
169{
170 rb_vm_t *vm = GET_VM();
171 ASSERT_vm_unlocking();
172
173 vm_lock_enter(GET_RACTOR(), vm, false, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
174}
175
176void
177rb_vm_unlock_body(LOCATION_ARGS)
178{
179 rb_vm_t *vm = GET_VM();
180 ASSERT_vm_locking();
181 VM_ASSERT(vm->ractor.sync.lock_rec == 1);
182 vm_lock_leave(vm, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
183}
184
185static void
186vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
187{
188 ASSERT_vm_locking();
189 unsigned int lock_rec = vm->ractor.sync.lock_rec;
190 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
191
192 vm->ractor.sync.lock_rec = 0;
193 vm->ractor.sync.lock_owner = NULL;
194 if (msec > 0) {
195 rb_native_cond_timedwait(cond, &vm->ractor.sync.lock, msec);
196 }
197 else {
198 rb_native_cond_wait(cond, &vm->ractor.sync.lock);
199 }
200 vm->ractor.sync.lock_rec = lock_rec;
201 vm->ractor.sync.lock_owner = cr;
202}
203
204void
205rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond)
206{
207 vm_cond_wait(vm, cond, 0);
208}
209
210void
211rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec)
212{
213 vm_cond_wait(vm, cond, msec);
214}
215
216static bool
217vm_barrier_finish_p(rb_vm_t *vm)
218{
219 RUBY_DEBUG_LOG("cnt:%u living:%u blocking:%u",
220 vm->ractor.sync.barrier_cnt,
221 vm->ractor.cnt,
222 vm->ractor.blocking_cnt);
223
224 VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
225 return vm->ractor.blocking_cnt == vm->ractor.cnt;
226}
227
228void
229rb_vm_barrier(void)
230{
231 RB_DEBUG_COUNTER_INC(vm_sync_barrier);
232
233 if (!rb_multi_ractor_p()) {
234 // no other ractors
235 return;
236 }
237 else {
238 rb_vm_t *vm = GET_VM();
239 VM_ASSERT(vm->ractor.sync.barrier_waiting == false);
240 ASSERT_vm_locking();
241
242 rb_ractor_t *cr = vm->ractor.sync.lock_owner;
243 VM_ASSERT(cr == GET_RACTOR());
244 VM_ASSERT(rb_ractor_status_p(cr, ractor_running));
245
246 vm->ractor.sync.barrier_waiting = true;
247
248 RUBY_DEBUG_LOG("barrier start. cnt:%u living:%u blocking:%u",
249 vm->ractor.sync.barrier_cnt,
250 vm->ractor.cnt,
251 vm->ractor.blocking_cnt);
252
253 rb_vm_ractor_blocking_cnt_inc(vm, cr, __FILE__, __LINE__);
254
255 // send signal
256 rb_ractor_t *r = 0;
257 list_for_each(&vm->ractor.set, r, vmlr_node) {
258 if (r != cr) {
259 rb_ractor_vm_barrier_interrupt_running_thread(r);
260 }
261 }
262
263 // wait
264 while (!vm_barrier_finish_p(vm)) {
265 rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_cond);
266 }
267
268 RUBY_DEBUG_LOG("cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
269
270 rb_vm_ractor_blocking_cnt_dec(vm, cr, __FILE__, __LINE__);
271
272 vm->ractor.sync.barrier_waiting = false;
273 vm->ractor.sync.barrier_cnt++;
274
275 list_for_each(&vm->ractor.set, r, vmlr_node) {
276 rb_native_cond_signal(&r->barrier_wait_cond);
277 }
278 }
279}
280
281void
282rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
283 unsigned int recorded_lock_rec,
284 unsigned int current_lock_rec)
285{
286 VM_ASSERT(recorded_lock_rec != current_lock_rec);
287
288 if (UNLIKELY(recorded_lock_rec > current_lock_rec)) {
289 rb_bug("unexpected situation - recordd:%u current:%u",
290 recorded_lock_rec, current_lock_rec);
291 }
292 else {
293 while (recorded_lock_rec < current_lock_rec) {
294 RB_VM_LOCK_LEAVE_LEV(&current_lock_rec);
295 }
296 }
297
298 VM_ASSERT(recorded_lock_rec == rb_ec_vm_lock_rec(ec));
299}
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:802
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.