source: libcfa/src/concurrency/locks.hfa@ 70d8e2f2

Last change on this file since 70d8e2f2 was 1e538fb, checked in by caparsons <caparson@…>, 2 years ago

added a comment

  • Property mode set to 100644
File size: 27.7 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include "select.hfa"
33
34// futex headers
35#include <linux/futex.h> /* Definition of FUTEX_* constants */
36#include <sys/syscall.h> /* Definition of SYS_* constants */
37#include <unistd.h> /* Definition of syscall routine */
38
39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
45forall( L & | sized(L) )
46trait is_blocking_lock {
47 // For synchronization locks to use when acquiring
48 void on_notify( L &, struct thread$ * );
49
50 // For synchronization locks to use when releasing
51 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53 // to set recursion count after getting signalled;
54 void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
58 pp_fn( pp_datum );
59 park();
60}
61
62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
65 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
66
67#define DEFAULT_ON_WAIT( lock_type ) \
68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69 unlock( this ); \
70 pre_park_then_park( pp_fn, pp_datum ); \
71 return 0; \
72 }
73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
76 static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
80 static inline void on_wakeup( lock_type & this, size_t recursion ) {}
81
82
83
84//-----------------------------------------------------------------------------
85// Semaphore
86struct semaphore {
87 __spinlock_t lock;
88 int count;
89 __queue_t(thread$) waiting;
90};
91
92void ?{}(semaphore & this, int count = 1);
93void ^?{}(semaphore & this);
94bool P (semaphore & this);
95bool V (semaphore & this);
96bool V (semaphore & this, unsigned count);
97thread$ * V (semaphore & this, bool );
98
99//----------
100struct single_acquisition_lock {
101 inline blocking_lock;
102};
103
104static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
105static inline void ^?{}( single_acquisition_lock & this ) {}
106static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
107static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
108static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
109static inline size_t on_wait ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
110static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
111static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
112static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
113static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
114static inline void on_selected( single_acquisition_lock & this, select_node & node ) { on_selected( (blocking_lock &)this, node ); }
115
116//----------
117struct owner_lock {
118 inline blocking_lock;
119};
120
121static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
122static inline void ^?{}( owner_lock & this ) {}
123static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
124static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
125static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
126static inline size_t on_wait ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
127static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
128static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
129static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
130static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
131static inline void on_selected( owner_lock & this, select_node & node ) { on_selected( (blocking_lock &)this, node ); }
132
133//-----------------------------------------------------------------------------
134// MCS Lock
135struct mcs_node {
136 mcs_node * volatile next;
137 single_sem sem;
138};
139
140static inline void ?{}(mcs_node & this) { this.next = 0p; }
141
142static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
143 return node->next;
144}
145
146struct mcs_lock {
147 mcs_queue(mcs_node) queue;
148};
149
150static inline void lock(mcs_lock & l, mcs_node & n) {
151 if(push(l.queue, &n))
152 wait(n.sem);
153}
154
155static inline void unlock(mcs_lock & l, mcs_node & n) {
156 mcs_node * next = advance(l.queue, &n);
157 if(next) post(next->sem);
158}
159
160//-----------------------------------------------------------------------------
161// MCS Spin Lock
162// - No recursive acquisition
163// - Needs to be released by owner
164
165struct mcs_spin_node {
166 mcs_spin_node * volatile next;
167 volatile bool locked;
168};
169
170struct mcs_spin_queue {
171 mcs_spin_node * volatile tail;
172};
173
174static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
175
176struct mcs_spin_lock {
177 mcs_spin_queue queue;
178};
179
180static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
181 n.locked = true;
182 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
183 if( prev == 0p ) return;
184 prev->next = &n;
185 while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
186}
187
188static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
189 mcs_spin_node * n_ptr = &n;
190 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
191 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
192 n.next->locked = false;
193}
194
195//-----------------------------------------------------------------------------
196// futex_mutex
197
198// - Kernel thd blocking alternative to the spinlock
199// - No ownership (will deadlock on reacq)
200// - no reacq on wakeup
201struct futex_mutex {
202 // lock state any state other than UNLOCKED is locked
203 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
204
205 // stores a lock state
206 int val;
207};
208
209// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
210static inline int futex(int *uaddr, int futex_op, int val) {
211 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
212}
213
214static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
215
216static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
217 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
218}
219
220static inline int internal_exchange( futex_mutex & this ) with(this) {
221 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
222}
223
224// if this is called recursively IT WILL DEADLOCK!!!!!
225static inline void lock( futex_mutex & this ) with(this) {
226 int state;
227
228 for( int spin = 4; spin < 1024; spin += spin) {
229 state = 0;
230 // if unlocked, lock and return
231 if (internal_try_lock(this, state)) return;
232 if (2 == state) break;
233 for (int i = 0; i < spin; i++) Pause();
234 }
235
236 // if not in contended state, set to be in contended state
237 if (state != 2) state = internal_exchange(this);
238
239 // block and spin until we win the lock
240 while (state != 0) {
241 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
242 state = internal_exchange(this);
243 }
244}
245
246static inline void unlock(futex_mutex & this) with(this) {
247 // if uncontended do atomic unlock and then return
248 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
249
250 // otherwise threads are blocked so we must wake one
251 futex((int *)&val, FUTEX_WAKE, 1);
252}
253
254DEFAULT_ON_NOTIFY( futex_mutex )
255DEFAULT_ON_WAIT( futex_mutex )
256DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
257
258//-----------------------------------------------------------------------------
259// go_mutex
260
261// - Kernel thd blocking alternative to the spinlock
262// - No ownership (will deadlock on reacq)
263// - Golang's flavour of mutex
264// - Impl taken from Golang: src/runtime/lock_futex.go
265struct go_mutex {
266 // lock state any state other than UNLOCKED is locked
267 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
268
269 // stores a lock state
270 int val;
271};
272static inline void ?{}( go_mutex & this ) with(this) { val = 0; }
273// static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted
274// static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
275
276static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
277 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
278}
279
280static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
281 return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
282}
283
284// if this is called recursively IT WILL DEADLOCK!!!!!
285static inline void lock( go_mutex & this ) with( this ) {
286 int state, init_state;
287
288 // speculative grab
289 state = internal_exchange(this, 1);
290 if ( !state ) return; // state == 0
291 init_state = state;
292 for (;;) {
293 for( int i = 0; i < 4; i++ ) {
294 while( !val ) { // lock unlocked
295 state = 0;
296 if ( internal_try_lock( this, state, init_state ) ) return;
297 }
298 for (int i = 0; i < 30; i++) Pause();
299 }
300
301 while( !val ) { // lock unlocked
302 state = 0;
303 if ( internal_try_lock( this, state, init_state ) ) return;
304 }
305 sched_yield();
306
307 // if not in contended state, set to be in contended state
308 state = internal_exchange( this, 2 );
309 if ( !state ) return; // state == 0
310 init_state = 2;
311 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
312 }
313}
314
315static inline void unlock( go_mutex & this ) with(this) {
316 // if uncontended do atomic unlock and then return
317 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
318
319 // otherwise threads are blocked so we must wake one
320 futex( (int *)&val, FUTEX_WAKE, 1 );
321}
322
323DEFAULT_ON_NOTIFY( go_mutex )
324DEFAULT_ON_WAIT( go_mutex )
325DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
326
327//-----------------------------------------------------------------------------
328// Exponential backoff then block lock
329struct exp_backoff_then_block_lock {
330 // Spin lock used for mutual exclusion
331 __spinlock_t spinlock;
332
333 // List of blocked threads
334 dlist( thread$ ) blocked_threads;
335
336 // Used for comparing and exchanging
337 volatile size_t lock_value;
338};
339
340static inline void ?{}( exp_backoff_then_block_lock & this ) {
341 this.spinlock{};
342 this.blocked_threads{};
343 this.lock_value = 0;
344}
345static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
346static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
347
348static inline void ^?{}( exp_backoff_then_block_lock & this ){}
349
350static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
351 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
352}
353
354static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
355
356static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
357 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
358}
359
360static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
361 lock( spinlock __cfaabi_dbg_ctx2 );
362 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
363 unlock( spinlock );
364 return true;
365 }
366 insert_last( blocked_threads, *active_thread() );
367 unlock( spinlock );
368 park( );
369 return true;
370}
371
372static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
373 size_t compare_val = 0;
374 int spin = 4;
375
376 // linear backoff
377 for( ;; ) {
378 compare_val = 0;
379 if (internal_try_lock(this, compare_val)) return;
380 if (2 == compare_val) break;
381 for (int i = 0; i < spin; i++) Pause();
382 if (spin >= 1024) break;
383 spin += spin;
384 }
385
386 if(2 != compare_val && try_lock_contention(this)) return;
387 // block until signalled
388 while (block(this)) if(try_lock_contention(this)) return;
389}
390
391static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
392 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
393 lock( spinlock __cfaabi_dbg_ctx2 );
394 thread$ * t = &try_pop_front( blocked_threads );
395 unlock( spinlock );
396 unpark( t );
397}
398
399DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
400DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
401DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
402
403//-----------------------------------------------------------------------------
404// Fast Block Lock
405
406// minimal blocking lock
407// - No reacquire for cond var
408// - No recursive acquisition
409// - No ownership
410struct fast_block_lock {
411 // List of blocked threads
412 dlist( thread$ ) blocked_threads;
413
414 // Spin lock used for mutual exclusion
415 __spinlock_t lock;
416
417 // flag showing if lock is held
418 bool held:1;
419};
420
421static inline void ?{}( fast_block_lock & this ) with(this) {
422 lock{};
423 blocked_threads{};
424 held = false;
425}
426static inline void ^?{}( fast_block_lock & this ) {}
427static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
428static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
429
430// if this is called recursively IT WILL DEADLOCK!!!!!
431static inline void lock( fast_block_lock & this ) with(this) {
432 lock( lock __cfaabi_dbg_ctx2 );
433 if ( held ) {
434 insert_last( blocked_threads, *active_thread() );
435 unlock( lock );
436 park( );
437 return;
438 }
439 held = true;
440 unlock( lock );
441}
442
443static inline void unlock( fast_block_lock & this ) with(this) {
444 lock( lock __cfaabi_dbg_ctx2 );
445 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
446 thread$ * t = &try_pop_front( blocked_threads );
447 held = ( t ? true : false );
448 unpark( t );
449 unlock( lock );
450}
451
452static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
453 lock( lock __cfaabi_dbg_ctx2 );
454 insert_last( blocked_threads, *t );
455 unlock( lock );
456}
457DEFAULT_ON_WAIT( fast_block_lock )
458DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
459
460//-----------------------------------------------------------------------------
461// simple_owner_lock
462
463// pthread owner lock
464// - reacquire for cond var
465// - recursive acquisition
466// - ownership
467struct simple_owner_lock {
468 // List of blocked threads
469 dlist( select_node ) blocked_threads;
470
471 // Spin lock used for mutual exclusion
472 __spinlock_t lock;
473
474 // owner showing if lock is held
475 struct thread$ * owner;
476
477 size_t recursion_count;
478};
479
480static inline void ?{}( simple_owner_lock & this ) with(this) {
481 lock{};
482 blocked_threads{};
483 owner = 0p;
484 recursion_count = 0;
485}
486static inline void ^?{}( simple_owner_lock & this ) {}
487static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
488static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
489
490static inline void lock( simple_owner_lock & this ) with(this) {
491 if ( owner == active_thread() ) {
492 recursion_count++;
493 return;
494 }
495 lock( lock __cfaabi_dbg_ctx2 );
496
497 if ( owner != 0p ) {
498 select_node node;
499 insert_last( blocked_threads, node );
500 unlock( lock );
501 park( );
502 return;
503 }
504 owner = active_thread();
505 recursion_count = 1;
506 unlock( lock );
507}
508
509static inline void pop_node( simple_owner_lock & this ) with(this) {
510 __handle_waituntil_OR( blocked_threads );
511 select_node * node = &try_pop_front( blocked_threads );
512 if ( node ) {
513 owner = node->blocked_thread;
514 recursion_count = 1;
515 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
516 wake_one( blocked_threads, *node );
517 } else {
518 owner = 0p;
519 recursion_count = 0;
520 }
521}
522
523static inline void unlock( simple_owner_lock & this ) with(this) {
524 lock( lock __cfaabi_dbg_ctx2 );
525 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
526 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
527 // if recursion count is zero release lock and set new owner if one is waiting
528 recursion_count--;
529 if ( recursion_count == 0 ) {
530 pop_node( this );
531 }
532 unlock( lock );
533}
534
535static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
536 lock( lock __cfaabi_dbg_ctx2 );
537 // lock held
538 if ( owner != 0p ) {
539 insert_last( blocked_threads, *(select_node *)t->link_node );
540 }
541 // lock not held
542 else {
543 owner = t;
544 recursion_count = 1;
545 unpark( t );
546 }
547 unlock( lock );
548}
549
550static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
551 lock( lock __cfaabi_dbg_ctx2 );
552 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
553 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
554
555 size_t ret = recursion_count;
556
557 pop_node( this );
558
559 select_node node;
560 active_thread()->link_node = (void *)&node;
561 unlock( lock );
562
563 pre_park_then_park( pp_fn, pp_datum );
564
565 return ret;
566}
567
568static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
569
570// waituntil() support
571static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
572 lock( lock __cfaabi_dbg_ctx2 );
573
574 // check if we can complete operation. If so race to establish winner in special OR case
575 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
576 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
577 unlock( lock );
578 return false;
579 }
580 }
581
582 if ( owner == active_thread() ) {
583 recursion_count++;
584 if ( node.park_counter ) __make_select_node_available( node );
585 unlock( lock );
586 return true;
587 }
588
589 if ( owner != 0p ) {
590 insert_last( blocked_threads, node );
591 unlock( lock );
592 return false;
593 }
594
595 owner = active_thread();
596 recursion_count = 1;
597
598 if ( node.park_counter ) __make_select_node_available( node );
599 unlock( lock );
600 return true;
601}
602
603static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
604 lock( lock __cfaabi_dbg_ctx2 );
605 if ( node`isListed ) {
606 remove( node );
607 unlock( lock );
608 return false;
609 }
610
611 if ( owner == active_thread() ) {
612 recursion_count--;
613 if ( recursion_count == 0 ) {
614 pop_node( this );
615 }
616 }
617 unlock( lock );
618 return false;
619}
620
621static inline void on_selected( simple_owner_lock & this, select_node & node ) {}
622
623
624//-----------------------------------------------------------------------------
625// Spin Queue Lock
626
627// - No reacquire for cond var
628// - No recursive acquisition
629// - No ownership
630// - spin lock with no locking/atomics in unlock
631struct spin_queue_lock {
632 // Spin lock used for mutual exclusion
633 mcs_spin_lock lock;
634
635 // flag showing if lock is held
636 volatile bool held;
637};
638
639static inline void ?{}( spin_queue_lock & this ) with(this) {
640 lock{};
641 held = false;
642}
643static inline void ^?{}( spin_queue_lock & this ) {}
644static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
645static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
646
647// if this is called recursively IT WILL DEADLOCK!
648static inline void lock( spin_queue_lock & this ) with(this) {
649 mcs_spin_node node;
650 lock( lock, node );
651 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
652 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
653 unlock( lock, node );
654}
655
656static inline void unlock( spin_queue_lock & this ) with(this) {
657 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
658}
659
660DEFAULT_ON_NOTIFY( spin_queue_lock )
661DEFAULT_ON_WAIT( spin_queue_lock )
662DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
663
664//-----------------------------------------------------------------------------
665// MCS Block Spin Lock
666
667// - No reacquire for cond var
668// - No recursive acquisition
669// - No ownership
670// - Blocks but first node spins (like spin queue but blocking for not first thd)
671struct mcs_block_spin_lock {
672 // Spin lock used for mutual exclusion
673 mcs_lock lock;
674
675 // flag showing if lock is held
676 volatile bool held;
677};
678
679static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
680 lock{};
681 held = false;
682}
683static inline void ^?{}( mcs_block_spin_lock & this ) {}
684static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
685static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
686
687// if this is called recursively IT WILL DEADLOCK!!!!!
688static inline void lock( mcs_block_spin_lock & this ) with(this) {
689 mcs_node node;
690 lock( lock, node );
691 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
692 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
693 unlock( lock, node );
694}
695
696static inline void unlock(mcs_block_spin_lock & this) with(this) {
697 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
698}
699
700DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
701DEFAULT_ON_WAIT( mcs_block_spin_lock )
702DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
703
704//-----------------------------------------------------------------------------
705// Block Spin Lock
706
707// - No reacquire for cond var
708// - No recursive acquisition
709// - No ownership
710// - Blocks but first node spins (like spin queue but blocking for not first thd)
711struct block_spin_lock {
712 // Spin lock used for mutual exclusion
713 fast_block_lock lock;
714
715 // flag showing if lock is held
716 volatile bool held;
717};
718
719static inline void ?{}( block_spin_lock & this ) with(this) {
720 lock{};
721 held = false;
722}
723static inline void ^?{}( block_spin_lock & this ) {}
724static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
725static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
726
727// if this is called recursively IT WILL DEADLOCK!!!!!
728static inline void lock( block_spin_lock & this ) with(this) {
729 lock( lock );
730 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
731 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
732 unlock( lock );
733}
734
735static inline void unlock( block_spin_lock & this ) with(this) {
736 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
737}
738
739static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
740 // first we acquire internal fast_block_lock
741 lock( lock __cfaabi_dbg_ctx2 );
742 if ( held ) { // if internal fast_block_lock is held
743 insert_last( blocked_threads, *t );
744 unlock( lock );
745 return;
746 }
747 // if internal fast_block_lock is not held
748 held = true;
749 unlock( lock );
750
751 unpark(t);
752}
753DEFAULT_ON_WAIT( block_spin_lock )
754static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
755 // now we acquire the entire block_spin_lock upon waking up
756 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
757 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
758 unlock( lock ); // Now we release the internal fast_spin_lock
759}
760
761//-----------------------------------------------------------------------------
762// // info_thread
763// // the info thread is a wrapper around a thread used
764// // to store extra data for use in the condition variable
765forall(L & | is_blocking_lock(L)) {
766 struct info_thread;
767}
768
769//-----------------------------------------------------------------------------
770// Synchronization Locks
771forall(L & | is_blocking_lock(L)) {
772
773 //-----------------------------------------------------------------------------
774 // condition_variable
775
776 // The multi-tool condition variable
777 // - can pass timeouts to wait for either a signal or timeout
778 // - can wait without passing a lock
779 // - can have waiters reacquire different locks while waiting on the same cond var
780 // - has shadow queue
781 // - can be signalled outside of critical sections with no locks held
782 struct condition_variable {
783 // Spin lock used for mutual exclusion
784 __spinlock_t lock;
785
786 // List of blocked threads
787 dlist( info_thread(L) ) blocked_threads;
788
789 // Count of current blocked threads
790 int count;
791 };
792
793
794 void ?{}( condition_variable(L) & this );
795 void ^?{}( condition_variable(L) & this );
796
797 bool notify_one( condition_variable(L) & this );
798 bool notify_all( condition_variable(L) & this );
799
800 uintptr_t front( condition_variable(L) & this );
801
802 bool empty ( condition_variable(L) & this );
803 int counter( condition_variable(L) & this );
804
805 void wait( condition_variable(L) & this );
806 void wait( condition_variable(L) & this, uintptr_t info );
807 bool wait( condition_variable(L) & this, Duration duration );
808 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
809
810 void wait( condition_variable(L) & this, L & l );
811 void wait( condition_variable(L) & this, L & l, uintptr_t info );
812 bool wait( condition_variable(L) & this, L & l, Duration duration );
813 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
814
815 //-----------------------------------------------------------------------------
816 // fast_cond_var
817
818 // The trimmed and slim condition variable
819 // - no internal lock so you must hold a lock while using this cond var
820 // - signalling without holding branded lock is UNSAFE!
821 // - only allows usage of one lock, cond var is branded after usage
822
823 struct fast_cond_var {
824 // List of blocked threads
825 dlist( info_thread(L) ) blocked_threads;
826 #ifdef __CFA_DEBUG__
827 L * lock_used;
828 #endif
829 };
830
831 void ?{}( fast_cond_var(L) & this );
832 void ^?{}( fast_cond_var(L) & this );
833
834 bool notify_one( fast_cond_var(L) & this );
835 bool notify_all( fast_cond_var(L) & this );
836
837 uintptr_t front( fast_cond_var(L) & this );
838 bool empty ( fast_cond_var(L) & this );
839
840 void wait( fast_cond_var(L) & this, L & l );
841 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
842
843
844 //-----------------------------------------------------------------------------
845 // pthread_cond_var
846 //
847 // - cond var with minimal footprint
848 // - supports operations needed for phthread cond
849
850 struct pthread_cond_var {
851 dlist( info_thread(L) ) blocked_threads;
852 __spinlock_t lock;
853 };
854
855 void ?{}( pthread_cond_var(L) & this );
856 void ^?{}( pthread_cond_var(L) & this );
857
858 bool notify_one( pthread_cond_var(L) & this );
859 bool notify_all( pthread_cond_var(L) & this );
860
861 uintptr_t front( pthread_cond_var(L) & this );
862 bool empty ( pthread_cond_var(L) & this );
863
864 void wait( pthread_cond_var(L) & this, L & l );
865 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
866 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
867 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
868}
Note: See TracBrowser for help on using the repository browser.