source: libcfa/src/concurrency/locks.hfa@ 5908fb4

ADT ast-experimental
Last change on this file since 5908fb4 was 5a05946, checked in by caparsons <caparson@…>, 2 years ago

cleanup up locks files and fixed a minor whitespace issue in preemption.cfa

  • Property mode set to 100644
File size: 27.8 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include "select.hfa"
33
34#include <fstream.hfa>
35
36// futex headers
37#include <linux/futex.h> /* Definition of FUTEX_* constants */
38#include <sys/syscall.h> /* Definition of SYS_* constants */
39#include <unistd.h>
40
41typedef void (*__cfa_pre_park)( void * );
42
43static inline void pre_park_noop( void * ) {}
44
45//-----------------------------------------------------------------------------
46// is_blocking_lock
47forall( L & | sized(L) )
48trait is_blocking_lock {
49 // For synchronization locks to use when acquiring
50 void on_notify( L &, struct thread$ * );
51
52 // For synchronization locks to use when releasing
53 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
54
55 // to set recursion count after getting signalled;
56 void on_wakeup( L &, size_t recursion );
57};
58
59static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
60 pp_fn( pp_datum );
61 park();
62}
63
64// macros for default routine impls for is_blocking_lock trait that do not wait-morph
65
66#define DEFAULT_ON_NOTIFY( lock_type ) \
67 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
68
69#define DEFAULT_ON_WAIT( lock_type ) \
70 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
71 unlock( this ); \
72 pre_park_then_park( pp_fn, pp_datum ); \
73 return 0; \
74 }
75
76// on_wakeup impl if lock should be reacquired after waking up
77#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
78 static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
79
80// on_wakeup impl if lock will not be reacquired after waking up
81#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
82 static inline void on_wakeup( lock_type & this, size_t recursion ) {}
83
84
85
86//-----------------------------------------------------------------------------
87// Semaphore
88struct semaphore {
89 __spinlock_t lock;
90 int count;
91 __queue_t(thread$) waiting;
92};
93
94void ?{}(semaphore & this, int count = 1);
95void ^?{}(semaphore & this);
96bool P (semaphore & this);
97bool V (semaphore & this);
98bool V (semaphore & this, unsigned count);
99thread$ * V (semaphore & this, bool );
100
101//----------
102struct single_acquisition_lock {
103 inline blocking_lock;
104};
105
106static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
107static inline void ^?{}( single_acquisition_lock & this ) {}
108static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
109static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
110static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
111static inline size_t on_wait ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
112static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
113static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
114static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
115static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
116static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
117
118//----------
119struct owner_lock {
120 inline blocking_lock;
121};
122
123static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
124static inline void ^?{}( owner_lock & this ) {}
125static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
126static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
127static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
128static inline size_t on_wait ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
129static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
130static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
131static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
132static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
133static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
134
135//-----------------------------------------------------------------------------
136// MCS Lock
137struct mcs_node {
138 mcs_node * volatile next;
139 single_sem sem;
140};
141
142static inline void ?{}(mcs_node & this) { this.next = 0p; }
143
144static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
145 return node->next;
146}
147
148struct mcs_lock {
149 mcs_queue(mcs_node) queue;
150};
151
152static inline void lock(mcs_lock & l, mcs_node & n) {
153 if(push(l.queue, &n))
154 wait(n.sem);
155}
156
157static inline void unlock(mcs_lock & l, mcs_node & n) {
158 mcs_node * next = advance(l.queue, &n);
159 if(next) post(next->sem);
160}
161
162//-----------------------------------------------------------------------------
163// MCS Spin Lock
164// - No recursive acquisition
165// - Needs to be released by owner
166
167struct mcs_spin_node {
168 mcs_spin_node * volatile next;
169 volatile bool locked;
170};
171
172struct mcs_spin_queue {
173 mcs_spin_node * volatile tail;
174};
175
176static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
177
178static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
179 return node->next;
180}
181
182struct mcs_spin_lock {
183 mcs_spin_queue queue;
184};
185
186static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
187 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
188 n.locked = true;
189 if(prev == 0p) return;
190 prev->next = &n;
191 while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
192}
193
194static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
195 mcs_spin_node * n_ptr = &n;
196 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
197 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
198 n.next->locked = false;
199}
200
201//-----------------------------------------------------------------------------
202// futex_mutex
203
204// - Kernel thd blocking alternative to the spinlock
205// - No ownership (will deadlock on reacq)
206// - no reacq on wakeup
207struct futex_mutex {
208 // lock state any state other than UNLOCKED is locked
209 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
210
211 // stores a lock state
212 int val;
213};
214
215// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
216static inline int futex(int *uaddr, int futex_op, int val) {
217 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
218}
219
220static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
221
222static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
223 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
224}
225
226static inline int internal_exchange( futex_mutex & this ) with(this) {
227 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
228}
229
230// if this is called recursively IT WILL DEADLOCK!!!!!
231static inline void lock( futex_mutex & this ) with(this) {
232 int state;
233
234 for( int spin = 4; spin < 1024; spin += spin) {
235 state = 0;
236 // if unlocked, lock and return
237 if (internal_try_lock(this, state)) return;
238 if (2 == state) break;
239 for (int i = 0; i < spin; i++) Pause();
240 }
241
242 // if not in contended state, set to be in contended state
243 if (state != 2) state = internal_exchange(this);
244
245 // block and spin until we win the lock
246 while (state != 0) {
247 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
248 state = internal_exchange(this);
249 }
250}
251
252static inline void unlock(futex_mutex & this) with(this) {
253 // if uncontended do atomic unlock and then return
254 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
255
256 // otherwise threads are blocked so we must wake one
257 futex((int *)&val, FUTEX_WAKE, 1);
258}
259
260DEFAULT_ON_NOTIFY( futex_mutex )
261DEFAULT_ON_WAIT( futex_mutex )
262DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
263
264//-----------------------------------------------------------------------------
265// go_mutex
266
267// - Kernel thd blocking alternative to the spinlock
268// - No ownership (will deadlock on reacq)
269// - Golang's flavour of mutex
270// - Impl taken from Golang: src/runtime/lock_futex.go
271struct go_mutex {
272 // lock state any state other than UNLOCKED is locked
273 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
274
275 // stores a lock state
276 int val;
277};
278static inline void ?{}( go_mutex & this ) with(this) { val = 0; }
279// static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted
280// static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
281
282static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
283 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
284}
285
286static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
287 return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
288}
289
290// if this is called recursively IT WILL DEADLOCK!!!!!
291static inline void lock( go_mutex & this ) with( this ) {
292 int state, init_state;
293
294 // speculative grab
295 state = internal_exchange(this, 1);
296 if ( !state ) return; // state == 0
297 init_state = state;
298 for (;;) {
299 for( int i = 0; i < 4; i++ ) {
300 while( !val ) { // lock unlocked
301 state = 0;
302 if ( internal_try_lock( this, state, init_state ) ) return;
303 }
304 for (int i = 0; i < 30; i++) Pause();
305 }
306
307 while( !val ) { // lock unlocked
308 state = 0;
309 if ( internal_try_lock( this, state, init_state ) ) return;
310 }
311 sched_yield();
312
313 // if not in contended state, set to be in contended state
314 state = internal_exchange( this, 2 );
315 if ( !state ) return; // state == 0
316 init_state = 2;
317 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
318 }
319}
320
321static inline void unlock( go_mutex & this ) with(this) {
322 // if uncontended do atomic unlock and then return
323 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
324
325 // otherwise threads are blocked so we must wake one
326 futex( (int *)&val, FUTEX_WAKE, 1 );
327}
328
329DEFAULT_ON_NOTIFY( go_mutex )
330DEFAULT_ON_WAIT( go_mutex )
331DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
332
333//-----------------------------------------------------------------------------
334// Exponential backoff then block lock
335struct exp_backoff_then_block_lock {
336 // Spin lock used for mutual exclusion
337 __spinlock_t spinlock;
338
339 // List of blocked threads
340 dlist( thread$ ) blocked_threads;
341
342 // Used for comparing and exchanging
343 volatile size_t lock_value;
344};
345
346static inline void ?{}( exp_backoff_then_block_lock & this ) {
347 this.spinlock{};
348 this.blocked_threads{};
349 this.lock_value = 0;
350}
351static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
352static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
353
354static inline void ^?{}( exp_backoff_then_block_lock & this ){}
355
356static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
357 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
358}
359
360static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
361
362static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
363 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
364}
365
366static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
367 lock( spinlock __cfaabi_dbg_ctx2 );
368 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
369 unlock( spinlock );
370 return true;
371 }
372 insert_last( blocked_threads, *active_thread() );
373 unlock( spinlock );
374 park( );
375 return true;
376}
377
378static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
379 size_t compare_val = 0;
380 int spin = 4;
381
382 // linear backoff
383 for( ;; ) {
384 compare_val = 0;
385 if (internal_try_lock(this, compare_val)) return;
386 if (2 == compare_val) break;
387 for (int i = 0; i < spin; i++) Pause();
388 if (spin >= 1024) break;
389 spin += spin;
390 }
391
392 if(2 != compare_val && try_lock_contention(this)) return;
393 // block until signalled
394 while (block(this)) if(try_lock_contention(this)) return;
395}
396
397static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
398 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
399 lock( spinlock __cfaabi_dbg_ctx2 );
400 thread$ * t = &try_pop_front( blocked_threads );
401 unlock( spinlock );
402 unpark( t );
403}
404
405DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
406DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
407DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
408
409//-----------------------------------------------------------------------------
410// Fast Block Lock
411
412// minimal blocking lock
413// - No reacquire for cond var
414// - No recursive acquisition
415// - No ownership
416struct fast_block_lock {
417 // List of blocked threads
418 dlist( thread$ ) blocked_threads;
419
420 // Spin lock used for mutual exclusion
421 __spinlock_t lock;
422
423 // flag showing if lock is held
424 bool held:1;
425};
426
427static inline void ?{}( fast_block_lock & this ) with(this) {
428 lock{};
429 blocked_threads{};
430 held = false;
431}
432static inline void ^?{}( fast_block_lock & this ) {}
433static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
434static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
435
436// if this is called recursively IT WILL DEADLOCK!!!!!
437static inline void lock( fast_block_lock & this ) with(this) {
438 lock( lock __cfaabi_dbg_ctx2 );
439 if ( held ) {
440 insert_last( blocked_threads, *active_thread() );
441 unlock( lock );
442 park( );
443 return;
444 }
445 held = true;
446 unlock( lock );
447}
448
449static inline void unlock( fast_block_lock & this ) with(this) {
450 lock( lock __cfaabi_dbg_ctx2 );
451 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
452 thread$ * t = &try_pop_front( blocked_threads );
453 held = ( t ? true : false );
454 unpark( t );
455 unlock( lock );
456}
457
458static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
459 lock( lock __cfaabi_dbg_ctx2 );
460 insert_last( blocked_threads, *t );
461 unlock( lock );
462}
463DEFAULT_ON_WAIT( fast_block_lock )
464DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
465
466//-----------------------------------------------------------------------------
467// simple_owner_lock
468
469// pthread owner lock
470// - reacquire for cond var
471// - recursive acquisition
472// - ownership
473struct simple_owner_lock {
474 // List of blocked threads
475 dlist( select_node ) blocked_threads;
476
477 // Spin lock used for mutual exclusion
478 __spinlock_t lock;
479
480 // owner showing if lock is held
481 struct thread$ * owner;
482
483 size_t recursion_count;
484};
485
486static inline void ?{}( simple_owner_lock & this ) with(this) {
487 lock{};
488 blocked_threads{};
489 owner = 0p;
490 recursion_count = 0;
491}
492static inline void ^?{}( simple_owner_lock & this ) {}
493static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
494static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
495
496static inline void lock( simple_owner_lock & this ) with(this) {
497 if ( owner == active_thread() ) {
498 recursion_count++;
499 return;
500 }
501 lock( lock __cfaabi_dbg_ctx2 );
502
503 if ( owner != 0p ) {
504 select_node node;
505 insert_last( blocked_threads, node );
506 unlock( lock );
507 park( );
508 return;
509 }
510 owner = active_thread();
511 recursion_count = 1;
512 unlock( lock );
513}
514
515static inline void pop_node( simple_owner_lock & this ) with(this) {
516 __handle_waituntil_OR( blocked_threads );
517 select_node * node = &try_pop_front( blocked_threads );
518 if ( node ) {
519 owner = node->blocked_thread;
520 recursion_count = 1;
521 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
522 wake_one( blocked_threads, *node );
523 } else {
524 owner = 0p;
525 recursion_count = 0;
526 }
527}
528
529static inline void unlock( simple_owner_lock & this ) with(this) {
530 lock( lock __cfaabi_dbg_ctx2 );
531 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
532 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
533 // if recursion count is zero release lock and set new owner if one is waiting
534 recursion_count--;
535 if ( recursion_count == 0 ) {
536 pop_node( this );
537 }
538 unlock( lock );
539}
540
541static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
542 lock( lock __cfaabi_dbg_ctx2 );
543 // lock held
544 if ( owner != 0p ) {
545 insert_last( blocked_threads, *(select_node *)t->link_node );
546 }
547 // lock not held
548 else {
549 owner = t;
550 recursion_count = 1;
551 unpark( t );
552 }
553 unlock( lock );
554}
555
556static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
557 lock( lock __cfaabi_dbg_ctx2 );
558 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
559 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
560
561 size_t ret = recursion_count;
562
563 pop_node( this );
564
565 select_node node;
566 active_thread()->link_node = (void *)&node;
567 unlock( lock );
568
569 pre_park_then_park( pp_fn, pp_datum );
570
571 return ret;
572}
573
574static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
575
576// waituntil() support
577static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
578 lock( lock __cfaabi_dbg_ctx2 );
579
580 // check if we can complete operation. If so race to establish winner in special OR case
581 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
582 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
583 unlock( lock );
584 return false;
585 }
586 }
587
588 if ( owner == active_thread() ) {
589 recursion_count++;
590 if ( node.park_counter ) __make_select_node_available( node );
591 unlock( lock );
592 return true;
593 }
594
595 if ( owner != 0p ) {
596 insert_last( blocked_threads, node );
597 unlock( lock );
598 return false;
599 }
600
601 owner = active_thread();
602 recursion_count = 1;
603
604 if ( node.park_counter ) __make_select_node_available( node );
605 unlock( lock );
606 return true;
607}
608
609static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
610 lock( lock __cfaabi_dbg_ctx2 );
611 if ( node`isListed ) {
612 remove( node );
613 unlock( lock );
614 return false;
615 }
616
617 if ( owner == active_thread() ) {
618 recursion_count--;
619 if ( recursion_count == 0 ) {
620 pop_node( this );
621 }
622 }
623 unlock( lock );
624 return false;
625}
626
627static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
628
629
630//-----------------------------------------------------------------------------
631// Spin Queue Lock
632
633// - No reacquire for cond var
634// - No recursive acquisition
635// - No ownership
636// - spin lock with no locking/atomics in unlock
637struct spin_queue_lock {
638 // Spin lock used for mutual exclusion
639 mcs_spin_lock lock;
640
641 // flag showing if lock is held
642 volatile bool held;
643};
644
645static inline void ?{}( spin_queue_lock & this ) with(this) {
646 lock{};
647 held = false;
648}
649static inline void ^?{}( spin_queue_lock & this ) {}
650static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
651static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
652
653// if this is called recursively IT WILL DEADLOCK!
654static inline void lock( spin_queue_lock & this ) with(this) {
655 mcs_spin_node node;
656 lock( lock, node );
657 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
658 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
659 unlock( lock, node );
660}
661
662static inline void unlock( spin_queue_lock & this ) with(this) {
663 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
664}
665
666DEFAULT_ON_NOTIFY( spin_queue_lock )
667DEFAULT_ON_WAIT( spin_queue_lock )
668DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
669
670//-----------------------------------------------------------------------------
671// MCS Block Spin Lock
672
673// - No reacquire for cond var
674// - No recursive acquisition
675// - No ownership
676// - Blocks but first node spins (like spin queue but blocking for not first thd)
677struct mcs_block_spin_lock {
678 // Spin lock used for mutual exclusion
679 mcs_lock lock;
680
681 // flag showing if lock is held
682 volatile bool held;
683};
684
685static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
686 lock{};
687 held = false;
688}
689static inline void ^?{}( mcs_block_spin_lock & this ) {}
690static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
691static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
692
693// if this is called recursively IT WILL DEADLOCK!!!!!
694static inline void lock( mcs_block_spin_lock & this ) with(this) {
695 mcs_node node;
696 lock( lock, node );
697 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
698 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
699 unlock( lock, node );
700}
701
702static inline void unlock(mcs_block_spin_lock & this) with(this) {
703 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
704}
705
706DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
707DEFAULT_ON_WAIT( mcs_block_spin_lock )
708DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
709
710//-----------------------------------------------------------------------------
711// Block Spin Lock
712
713// - No reacquire for cond var
714// - No recursive acquisition
715// - No ownership
716// - Blocks but first node spins (like spin queue but blocking for not first thd)
717struct block_spin_lock {
718 // Spin lock used for mutual exclusion
719 fast_block_lock lock;
720
721 // flag showing if lock is held
722 volatile bool held;
723};
724
725static inline void ?{}( block_spin_lock & this ) with(this) {
726 lock{};
727 held = false;
728}
729static inline void ^?{}( block_spin_lock & this ) {}
730static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
731static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
732
733// if this is called recursively IT WILL DEADLOCK!!!!!
734static inline void lock( block_spin_lock & this ) with(this) {
735 lock( lock );
736 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
737 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
738 unlock( lock );
739}
740
741static inline void unlock( block_spin_lock & this ) with(this) {
742 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
743}
744
745static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
746 // first we acquire internal fast_block_lock
747 lock( lock __cfaabi_dbg_ctx2 );
748 if ( held ) { // if internal fast_block_lock is held
749 insert_last( blocked_threads, *t );
750 unlock( lock );
751 return;
752 }
753 // if internal fast_block_lock is not held
754 held = true;
755 unlock( lock );
756
757 unpark(t);
758}
759DEFAULT_ON_WAIT( block_spin_lock )
760static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
761 // now we acquire the entire block_spin_lock upon waking up
762 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
763 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
764 unlock( lock ); // Now we release the internal fast_spin_lock
765}
766
767//-----------------------------------------------------------------------------
768// // info_thread
769// // the info thread is a wrapper around a thread used
770// // to store extra data for use in the condition variable
771forall(L & | is_blocking_lock(L)) {
772 struct info_thread;
773}
774
775//-----------------------------------------------------------------------------
776// Synchronization Locks
777forall(L & | is_blocking_lock(L)) {
778
779 //-----------------------------------------------------------------------------
780 // condition_variable
781
782 // The multi-tool condition variable
783 // - can pass timeouts to wait for either a signal or timeout
784 // - can wait without passing a lock
785 // - can have waiters reacquire different locks while waiting on the same cond var
786 // - has shadow queue
787 // - can be signalled outside of critical sections with no locks held
788 struct condition_variable {
789 // Spin lock used for mutual exclusion
790 __spinlock_t lock;
791
792 // List of blocked threads
793 dlist( info_thread(L) ) blocked_threads;
794
795 // Count of current blocked threads
796 int count;
797 };
798
799
800 void ?{}( condition_variable(L) & this );
801 void ^?{}( condition_variable(L) & this );
802
803 bool notify_one( condition_variable(L) & this );
804 bool notify_all( condition_variable(L) & this );
805
806 uintptr_t front( condition_variable(L) & this );
807
808 bool empty ( condition_variable(L) & this );
809 int counter( condition_variable(L) & this );
810
811 void wait( condition_variable(L) & this );
812 void wait( condition_variable(L) & this, uintptr_t info );
813 bool wait( condition_variable(L) & this, Duration duration );
814 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
815
816 void wait( condition_variable(L) & this, L & l );
817 void wait( condition_variable(L) & this, L & l, uintptr_t info );
818 bool wait( condition_variable(L) & this, L & l, Duration duration );
819 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
820
821 //-----------------------------------------------------------------------------
822 // fast_cond_var
823
824 // The trimmed and slim condition variable
825 // - no internal lock so you must hold a lock while using this cond var
826 // - signalling without holding branded lock is UNSAFE!
827 // - only allows usage of one lock, cond var is branded after usage
828
829 struct fast_cond_var {
830 // List of blocked threads
831 dlist( info_thread(L) ) blocked_threads;
832 #ifdef __CFA_DEBUG__
833 L * lock_used;
834 #endif
835 };
836
837 void ?{}( fast_cond_var(L) & this );
838 void ^?{}( fast_cond_var(L) & this );
839
840 bool notify_one( fast_cond_var(L) & this );
841 bool notify_all( fast_cond_var(L) & this );
842
843 uintptr_t front( fast_cond_var(L) & this );
844 bool empty ( fast_cond_var(L) & this );
845
846 void wait( fast_cond_var(L) & this, L & l );
847 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
848
849
850 //-----------------------------------------------------------------------------
851 // pthread_cond_var
852 //
853 // - cond var with minimal footprint
854 // - supports operations needed for phthread cond
855
856 struct pthread_cond_var {
857 dlist( info_thread(L) ) blocked_threads;
858 __spinlock_t lock;
859 };
860
861 void ?{}( pthread_cond_var(L) & this );
862 void ^?{}( pthread_cond_var(L) & this );
863
864 bool notify_one( pthread_cond_var(L) & this );
865 bool notify_all( pthread_cond_var(L) & this );
866
867 uintptr_t front( pthread_cond_var(L) & this );
868 bool empty ( pthread_cond_var(L) & this );
869
870 void wait( pthread_cond_var(L) & this, L & l );
871 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
872 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
873 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
874}
Note: See TracBrowser for help on using the repository browser.