source: libcfa/src/concurrency/locks.hfa@ 1034059

Last change on this file since 1034059 was 95330c33, checked in by Peter A. Buhr <pabuhr@…>, 6 weeks ago

rename private variable with trailing $, and restructure lock

  • Property mode set to 100644
File size: 29.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By : Peter A. Buhr
13// Last Modified On : Fri Oct 31 09:20:22 2025
14// Update Count : 59
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "collections/lockfree.hfa"
24#include "collections/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include "select.hfa"
33
34// futex headers
35#include <linux/futex.h> // Definition of FUTEX_* constants
36#include <sys/syscall.h> // Definition of SYS_* constants
37#include <unistd.h> // Definition of syscall routine
38
39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
45forall( L & /*| sized( L )*/ )
46trait is_blocking_lock {
47 // For synchronization locks to use when acquiring
48 void on_notify( L &, struct thread$ * );
49
50 // For synchronization locks to use when releasing
51 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53 // to set recursion count after getting signalled;
54 void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
58 pp_fn( pp_datum );
59 park();
60}
61
62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
66
67#define DEFAULT_ON_WAIT( lock_type ) \
68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69 unlock( this ); \
70 pre_park_then_park( pp_fn, pp_datum ); \
71 return 0; \
72 }
73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
81
82
83//-----------------------------------------------------------------------------
84// Semaphore, counting
85
86struct semaphore {
87 // PRIVATE
88 ssize_t count$; // - => # waiting threads, 0 => block, + => acquire
89 __spinlock_t lock$; // protect blocking-lock critical sections
90 __queue_t( thread$ ) waiting$; // waiting threads
91};
92
93// PRIVATE
94//thread$ * V( semaphore & sem, bool );
95
96// PUBLIC
97void ?{}( semaphore & sem, ssize_t count = 1 );
98// Return values are used for composition. Calling threads know if a thread is unblocked, which can be useful for
99// debugging, performance instrumentation and other metadata tracking purposes.
100bool P( semaphore & sem );
101static inline bool P( semaphore & sem, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem ); }
102bool P( semaphore & sem, semaphore & lock ); // atomic block and release
103bool try_P( semaphore & sem );
104static inline bool P( semaphore & sem, semaphore & lock, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem, lock ); }
105bool V( semaphore & sem );
106bool V( semaphore & sem, size_t count );
107static inline uintptr_t front( semaphore & sem ) with( sem ) { // return shadow information for first waiting thread
108 #ifdef __CFA_DEBUG__
109 if ( waiting$ ) { // condition queue must not be empty
110 abort( "Attempt to access user data on an empty semaphore lock.\n"
111 "Possible cause is not checking if the condition lock is empty before reading stored data." );
112 } // if
113 #endif // __CFA_DEBUG__
114 return waiting$.head->shadow$; // return condition information stored with blocked task
115}
116static inline ssize_t counter( semaphore & sem ) with( sem ) { return count$; } // - => # waiting threads, 0 => block, + => acquire
117//static inline int ?!=?( semaphore & sem, zero_t ) with( sem ) { return count$ != 0; } // empty waiting queue
118static inline bool empty( semaphore & sem ) with( sem ) { return count$ == 0; } // empty waiting queue
119
120
121//----------
122struct single_acquisition_lock {
123 inline blocking_lock;
124};
125
126static inline void ?{}( single_acquisition_lock & this ) { ((blocking_lock &)this){ false, false }; }
127static inline void ^?{}( single_acquisition_lock & this ) {}
128static inline void lock( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); }
129static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
130static inline void unlock( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); }
131static inline size_t on_wait( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
132static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
133static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
134static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
135static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
136static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
137__CFA_SELECT_GET_TYPE( single_acquisition_lock );
138
139//----------
140struct owner_lock {
141 inline blocking_lock;
142};
143
144static inline void ?{}( owner_lock & this ) { ((blocking_lock &)this){ true, true }; }
145static inline void ^?{}( owner_lock & this ) {}
146static inline void lock( owner_lock & this ) { lock( (blocking_lock &)this ); }
147static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
148static inline void unlock( owner_lock & this ) { unlock( (blocking_lock &)this ); }
149static inline size_t on_wait( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
150static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
151static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
152static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
153static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
154static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
155__CFA_SELECT_GET_TYPE( owner_lock );
156
157//-----------------------------------------------------------------------------
158// MCS Lock
159struct mcs_node {
160 mcs_node * volatile next;
161 single_sem sem;
162};
163
164static inline void ?{}( mcs_node & this ) { this.next = 0p; }
165
166static inline mcs_node * volatile & next( mcs_node * node ) {
167 return node->next;
168}
169
170struct mcs_lock {
171 mcs_queue( mcs_node ) queue;
172};
173
174static inline void lock( mcs_lock & l, mcs_node & n ) {
175 if ( push( l.queue, &n ) )
176 wait( n.sem );
177}
178
179static inline void unlock( mcs_lock & l, mcs_node & n ) {
180 mcs_node * nxt = advance( l.queue, &n );
181 if ( nxt ) post( nxt->sem );
182}
183
184//-----------------------------------------------------------------------------
185// MCS Spin Lock
186// - No recursive acquisition
187// - Needs to be released by owner
188
189struct mcs_spin_node {
190 mcs_spin_node * volatile next;
191 volatile bool locked;
192};
193
194struct mcs_spin_queue {
195 mcs_spin_node * volatile tail;
196};
197
198static inline void ?{}( mcs_spin_node & this ) { this.next = 0p; this.locked = true; }
199
200struct mcs_spin_lock {
201 mcs_spin_queue queue;
202};
203
204static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
205 n.locked = true;
206
207 #if defined( __ARM_ARCH )
208 __asm__ __volatile__ ( "DMB ISH" ::: );
209 #endif
210
211 mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
212 if ( prev_val == 0p ) return;
213 prev_val->next = &n;
214
215 #if defined( __ARM_ARCH )
216 __asm__ __volatile__ ( "DMB ISH" ::: );
217 #endif
218
219 while ( __atomic_load_n( &n.locked, __ATOMIC_RELAXED ) ) Pause();
220
221 #if defined( __ARM_ARCH )
222 __asm__ __volatile__ ( "DMB ISH" ::: );
223 #endif
224}
225
226static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n ) {
227 #if defined( __ARM_ARCH )
228 __asm__ __volatile__ ( "DMB ISH" ::: );
229 #endif
230
231 mcs_spin_node * n_ptr = &n;
232 if ( __atomic_compare_exchange_n( &l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return;
233 while ( __atomic_load_n( &n.next, __ATOMIC_RELAXED ) == 0p ) Pause();
234
235 #if defined( __ARM_ARCH )
236 __asm__ __volatile__ ( "DMB ISH" ::: );
237 #endif
238
239 n.next->locked = false;
240}
241
242//-----------------------------------------------------------------------------
243// futex_mutex
244
245// - Kernel thd blocking alternative to the spinlock
246// - No ownership (will deadlock on reacq)
247// - no reacq on wakeup
248struct futex_mutex {
249 // lock state any state other than UNLOCKED is locked
250 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
251
252 // stores a lock state
253 int val;
254};
255
256// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
257static inline int futex( int *uaddr, int futex_op, int val ) {
258 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
259}
260
261static inline void ?{}( futex_mutex & this ) with( this ) { val = 0; }
262
263static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with( this ) {
264 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
265}
266
267static inline int internal_exchange( futex_mutex & this ) with( this ) {
268 return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE );
269}
270
271// if this is called recursively IT WILL DEADLOCK!!!!!
272static inline void lock( futex_mutex & this ) with( this ) {
273 int state;
274
275 for ( spin; 4 ~ 1024 ~ spin ) {
276 state = 0;
277 // if unlocked, lock and return
278 if ( internal_try_lock( this, state ) ) return;
279 if ( state == 2 ) break;
280 for ( spin ) Pause();
281 }
282
283 // if not in contended state, set to be in contended state
284 if ( state != 2 ) state = internal_exchange( this );
285
286 // block and spin until we win the lock
287 while ( state != 0 ) {
288 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
289 state = internal_exchange( this );
290 }
291}
292
293static inline void unlock( futex_mutex & this ) with( this ) {
294 // if uncontended do atomic unlock and then return
295 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
296
297 // otherwise threads are blocked so we must wake one
298 futex(( int *)&val, FUTEX_WAKE, 1 );
299}
300
301DEFAULT_ON_NOTIFY( futex_mutex )
302DEFAULT_ON_WAIT( futex_mutex )
303DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
304
305//-----------------------------------------------------------------------------
306// go_mutex
307
308// - Kernel thd blocking alternative to the spinlock
309// - No ownership (will deadlock on reacq)
310// - Golang's flavour of mutex
311// - Impl taken from Golang: src/runtime/lock_futex.go
312struct go_mutex {
313 // lock state any state other than UNLOCKED is locked
314 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
315
316 // stores a lock state
317 int val;
318};
319static inline void ?{}( go_mutex & this ) with( this ) { val = 0; }
320static inline void ?{}( go_mutex & this, go_mutex this2 ) = void;
321static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
322
323static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with( this ) {
324 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
325}
326
327static inline int internal_exchange( go_mutex & this, int swap ) with( this ) {
328 return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE );
329}
330
331// if this is called recursively IT WILL DEADLOCK!!!!!
332static inline void lock( go_mutex & this ) with( this ) {
333 int state, init_state;
334
335 // speculative grab
336 state = internal_exchange( this, 1 );
337 if ( ! state ) return; // state == 0
338 init_state = state;
339 for () {
340 for ( 4 ) {
341 while ( ! val ) { // lock unlocked
342 state = 0;
343 if ( internal_try_lock( this, state, init_state ) ) return;
344 }
345 for ( 30 ) Pause();
346 }
347
348 while ( ! val ) { // lock unlocked
349 state = 0;
350 if ( internal_try_lock( this, state, init_state ) ) return;
351 }
352 sched_yield();
353
354 // if not in contended state, set to be in contended state
355 state = internal_exchange( this, 2 );
356 if ( ! state ) return; // state == 0
357 init_state = 2;
358 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
359 }
360}
361
362static inline void unlock( go_mutex & this ) with( this ) {
363 // if uncontended do atomic unlock and then return
364 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
365
366 // otherwise threads are blocked so we must wake one
367 futex( (int *)&val, FUTEX_WAKE, 1 );
368}
369
370DEFAULT_ON_NOTIFY( go_mutex )
371DEFAULT_ON_WAIT( go_mutex )
372DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
373
374//-----------------------------------------------------------------------------
375// Exponential backoff then block lock
376struct exp_backoff_then_block_lock {
377 // Spin lock used for mutual exclusion
378 __spinlock_t spinlock;
379
380 // List of blocked threads
381 dlist( thread$ ) blocked_threads;
382
383 // Used for comparing and exchanging
384 volatile size_t lock_value;
385};
386
387static inline void ?{}( exp_backoff_then_block_lock & this ) {
388 this.spinlock{};
389 this.blocked_threads{};
390 this.lock_value = 0;
391}
392static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
393static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
394
395static inline void ^?{}( exp_backoff_then_block_lock & this ){}
396
397static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this ) {
398 return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED );
399}
400
401static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
402
403static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this ) {
404 return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
405}
406
407static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
408 lock( spinlock __cfaabi_dbg_ctx2 );
409 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
410 unlock( spinlock );
411 return true;
412 }
413 insert_last( blocked_threads, *active_thread() );
414 unlock( spinlock );
415 park( );
416 return true;
417}
418
419static inline void lock( exp_backoff_then_block_lock & this ) with( this ) {
420 size_t compare_val = 0;
421 int spin = 4;
422
423 // linear backoff
424 for () {
425 compare_val = 0;
426 if ( internal_try_lock( this, compare_val ) ) return;
427 if ( compare_val == 2 ) break;
428 for ( spin ) Pause();
429 if ( spin >= 1024 ) break;
430 spin += spin;
431 }
432
433 if ( 2 != compare_val && try_lock_contention( this ) ) return;
434 // block until signalled
435 while ( block( this ) ) if ( try_lock_contention( this ) ) return;
436}
437
438static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
439 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
440 lock( spinlock __cfaabi_dbg_ctx2 );
441 thread$ * t = &remove_first( blocked_threads );
442 unlock( spinlock );
443 unpark( t );
444}
445
446DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
447DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
448DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
449
450//-----------------------------------------------------------------------------
451// Fast Block Lock
452
453// minimal blocking lock
454// - No reacquire for cond var
455// - No recursive acquisition
456// - No ownership
457struct fast_block_lock {
458 // List of blocked threads
459 dlist( thread$ ) blocked_threads;
460
461 // Spin lock used for mutual exclusion
462 __spinlock_t lock;
463
464 // flag showing if lock is held
465 bool held:1;
466};
467
468static inline void ?{}( fast_block_lock & this ) with( this ) {
469 lock{};
470 blocked_threads{};
471 held = false;
472}
473static inline void ^?{}( fast_block_lock & this ) {}
474static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
475static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
476
477// if this is called recursively IT WILL DEADLOCK!!!!!
478static inline void lock( fast_block_lock & this ) with( this ) {
479 lock( lock __cfaabi_dbg_ctx2 );
480 if ( held ) {
481 insert_last( blocked_threads, *active_thread() );
482 unlock( lock );
483 park( );
484 return;
485 }
486 held = true;
487 unlock( lock );
488}
489
490static inline void unlock( fast_block_lock & this ) with( this ) {
491 lock( lock __cfaabi_dbg_ctx2 );
492 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
493 thread$ * t = &remove_first( blocked_threads );
494 held = ( t ? true : false );
495 unpark( t );
496 unlock( lock );
497}
498
499static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
500 lock( lock __cfaabi_dbg_ctx2 );
501 insert_last( blocked_threads, *t );
502 unlock( lock );
503}
504DEFAULT_ON_WAIT( fast_block_lock )
505DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
506
507//-----------------------------------------------------------------------------
508// simple_owner_lock
509
510// pthread owner lock
511// - reacquire for cond var
512// - recursive acquisition
513// - ownership
514struct simple_owner_lock {
515 // List of blocked threads
516 dlist( select_node ) blocked_threads;
517
518 // Spin lock used for mutual exclusion
519 __spinlock_t lock;
520
521 // owner showing if lock is held
522 struct thread$ * owner;
523
524 size_t recursion_count;
525};
526
527static inline void ?{}( simple_owner_lock & this ) with( this ) {
528 lock{};
529 blocked_threads{};
530 owner = 0p;
531 recursion_count = 0;
532}
533static inline void ^?{}( simple_owner_lock & this ) {}
534static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
535static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
536
537static inline void lock( simple_owner_lock & this ) with( this ) {
538 if ( owner == active_thread() ) {
539 recursion_count++;
540 return;
541 }
542 lock( lock __cfaabi_dbg_ctx2 );
543
544 if ( owner != 0p ) {
545 select_node node;
546 insert_last( blocked_threads, node );
547 unlock( lock );
548 park( );
549 return;
550 }
551 owner = active_thread();
552 recursion_count = 1;
553 unlock( lock );
554}
555
556static inline void pop_node( simple_owner_lock & this ) with( this ) {
557 __handle_waituntil_OR( blocked_threads );
558 select_node * node = &remove_first( blocked_threads );
559 if ( node ) {
560 owner = node->blocked_thread;
561 recursion_count = 1;
562 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
563 wake_one( blocked_threads, *node );
564 } else {
565 owner = 0p;
566 recursion_count = 0;
567 }
568}
569
570static inline void unlock( simple_owner_lock & this ) with( this ) {
571 lock( lock __cfaabi_dbg_ctx2 );
572 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
573 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
574 // if recursion count is zero release lock and set new owner if one is waiting
575 recursion_count--;
576 if ( recursion_count == 0 ) {
577 pop_node( this );
578 }
579 unlock( lock );
580}
581
582static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this ) {
583 lock( lock __cfaabi_dbg_ctx2 );
584 // lock held
585 if ( owner != 0p ) {
586 insert_last( blocked_threads, *(select_node *)t->link_node );
587 }
588 // lock not held
589 else {
590 owner = t;
591 recursion_count = 1;
592 unpark( t );
593 }
594 unlock( lock );
595}
596
597static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this ) {
598 lock( lock __cfaabi_dbg_ctx2 );
599 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
600 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
601
602 size_t ret = recursion_count;
603
604 pop_node( this );
605
606 select_node node;
607 active_thread()->link_node = (void *)&node;
608 unlock( lock );
609
610 pre_park_then_park( pp_fn, pp_datum );
611
612 return ret;
613}
614
615static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this ) { recursion_count = recursion; }
616
617// waituntil() support
618static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
619 lock( lock __cfaabi_dbg_ctx2 );
620
621 // check if we can complete operation. If so race to establish winner in special OR case
622 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
623 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
624 unlock( lock );
625 return false;
626 }
627 }
628
629 if ( owner == active_thread() ) {
630 recursion_count++;
631 if ( node.park_counter ) __make_select_node_available( node );
632 unlock( lock );
633 return true;
634 }
635
636 if ( owner != 0p ) {
637 insert_last( blocked_threads, node );
638 unlock( lock );
639 return false;
640 }
641
642 owner = active_thread();
643 recursion_count = 1;
644
645 if ( node.park_counter ) __make_select_node_available( node );
646 unlock( lock );
647 return true;
648}
649
650static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
651 lock( lock __cfaabi_dbg_ctx2 );
652 if ( isListed( node ) ) {
653 remove( node );
654 unlock( lock );
655 return false;
656 }
657
658 if ( owner == active_thread() ) {
659 recursion_count--;
660 if ( recursion_count == 0 ) {
661 pop_node( this );
662 }
663 }
664 unlock( lock );
665 return false;
666}
667
668static inline bool on_selected( simple_owner_lock & /*this*/, select_node & /*node*/ ) { return true; }
669__CFA_SELECT_GET_TYPE( simple_owner_lock );
670
671//-----------------------------------------------------------------------------
672// Spin Queue Lock
673
674// - No reacquire for cond var
675// - No recursive acquisition
676// - No ownership
677// - spin lock with no locking/atomics in unlock
678struct spin_queue_lock {
679 // Spin lock used for mutual exclusion
680 mcs_spin_lock lock;
681
682 // flag showing if lock is held
683 volatile bool held;
684};
685
686static inline void ?{}( spin_queue_lock & this ) with( this ) {
687 lock{};
688 held = false;
689}
690static inline void ^?{}( spin_queue_lock & this ) {}
691static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
692static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
693
694// if this is called recursively IT WILL DEADLOCK!
695static inline void lock( spin_queue_lock & this ) with( this ) {
696 mcs_spin_node node;
697 lock( lock, node );
698 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
699 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
700 unlock( lock, node );
701}
702
703static inline void unlock( spin_queue_lock & this ) with( this ) {
704 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
705}
706
707DEFAULT_ON_NOTIFY( spin_queue_lock )
708DEFAULT_ON_WAIT( spin_queue_lock )
709DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
710
711//-----------------------------------------------------------------------------
712// MCS Block Spin Lock
713
714// - No reacquire for cond var
715// - No recursive acquisition
716// - No ownership
717// - Blocks but first node spins (like spin queue but blocking for not first thd)
718struct mcs_block_spin_lock {
719 // Spin lock used for mutual exclusion
720 mcs_lock lock;
721
722 // flag showing if lock is held
723 volatile bool held;
724};
725
726static inline void ?{}( mcs_block_spin_lock & this ) with( this ) {
727 lock{};
728 held = false;
729}
730static inline void ^?{}( mcs_block_spin_lock & this ) {}
731static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
732static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
733
734// if this is called recursively IT WILL DEADLOCK!!!!!
735static inline void lock( mcs_block_spin_lock & this ) with( this ) {
736 mcs_node node;
737 lock( lock, node );
738 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
739 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
740 unlock( lock, node );
741}
742
743static inline void unlock( mcs_block_spin_lock & this ) with( this ) {
744 __atomic_store_n( &held, false, __ATOMIC_SEQ_CST );
745}
746
747DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
748DEFAULT_ON_WAIT( mcs_block_spin_lock )
749DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
750
751//-----------------------------------------------------------------------------
752// Block Spin Lock
753
754// - No reacquire for cond var
755// - No recursive acquisition
756// - No ownership
757// - Blocks but first node spins (like spin queue but blocking for not first thd)
758struct block_spin_lock {
759 // Spin lock used for mutual exclusion
760 fast_block_lock lock;
761
762 // flag showing if lock is held
763 volatile bool held;
764};
765
766static inline void ?{}( block_spin_lock & this ) with( this ) {
767 lock{};
768 held = false;
769}
770static inline void ^?{}( block_spin_lock & this ) {}
771static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
772static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
773
774// if this is called recursively IT WILL DEADLOCK!!!!!
775static inline void lock( block_spin_lock & this ) with( this ) {
776 lock( lock );
777 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
778 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
779 unlock( lock );
780}
781
782static inline void unlock( block_spin_lock & this ) with( this ) {
783 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
784}
785
786static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock ) {
787 // first we acquire internal fast_block_lock
788 lock( lock __cfaabi_dbg_ctx2 );
789 if ( held ) { // if internal fast_block_lock is held
790 insert_last( blocked_threads, *t );
791 unlock( lock );
792 return;
793 }
794 // if internal fast_block_lock is not held
795 held = true;
796 unlock( lock );
797
798 unpark( t );
799}
800DEFAULT_ON_WAIT( block_spin_lock )
801 static inline void on_wakeup( block_spin_lock & this, size_t /*recursion*/ ) with( this ) {
802 // now we acquire the entire block_spin_lock upon waking up
803 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
804 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
805 unlock( lock ); // Now we release the internal fast_spin_lock
806}
807
808//-----------------------------------------------------------------------------
809// // info_thread
810// // the info thread is a wrapper around a thread used
811// // to store extra data for use in the condition variable
812forall( L & | is_blocking_lock( L ) ) {
813 struct info_thread;
814}
815
816//-----------------------------------------------------------------------------
817// Synchronization Locks
818forall( L & | is_blocking_lock( L ) ) {
819
820 //-----------------------------------------------------------------------------
821 // cond_lock
822
823 // The multi-tool condition variable
824 // - can pass timeouts to wait for either a signal or timeout
825 // - can wait without passing a lock
826 // - can have waiters reacquire different locks while waiting on the same cond var
827 // - has shadow queue
828 // - can be signalled outside of critical sections with no locks held
829 struct cond_lock {
830 // Spin lock used for mutual exclusion
831 __spinlock_t lock;
832
833 // List of blocked threads
834 dlist( info_thread( L ) ) blocked_threads;
835
836 // Count of current blocked threads
837 int count;
838 };
839
840 void ?{}( cond_lock( L ) & this );
841 void ^?{}( cond_lock( L ) & this );
842
843 bool notify_one( cond_lock( L ) & this );
844 bool notify_all( cond_lock( L ) & this );
845
846 uintptr_t front( cond_lock( L ) & this );
847
848 bool empty ( cond_lock( L ) & this );
849 int counter( cond_lock( L ) & this );
850
851 void wait( cond_lock( L ) & this );
852 void wait( cond_lock( L ) & this, uintptr_t info );
853 bool wait( cond_lock( L ) & this, Duration duration );
854 bool wait( cond_lock( L ) & this, uintptr_t info, Duration duration );
855
856 void wait( cond_lock( L ) & this, L & l );
857 void wait( cond_lock( L ) & this, L & l, uintptr_t info );
858 bool wait( cond_lock( L ) & this, L & l, Duration duration );
859 bool wait( cond_lock( L ) & this, L & l, uintptr_t info, Duration duration );
860
861 //-----------------------------------------------------------------------------
862 // fast_cond_var
863
864 // The trimmed and slim condition variable
865 // - no internal lock so you must hold a lock while using this cond var
866 // - signalling without holding branded lock is UNSAFE!
867 // - only allows usage of one lock, cond var is branded after usage
868
869 struct fast_cond_var {
870 // List of blocked threads
871 dlist( info_thread( L ) ) blocked_threads;
872 #ifdef __CFA_DEBUG__
873 L * lock_used;
874 #endif
875 };
876
877 void ?{}( fast_cond_var( L ) & this );
878 void ^?{}( fast_cond_var( L ) & this );
879
880 bool notify_one( fast_cond_var( L ) & this );
881 bool notify_all( fast_cond_var( L ) & this );
882
883 uintptr_t front( fast_cond_var( L ) & this );
884 bool empty ( fast_cond_var( L ) & this );
885
886 void wait( fast_cond_var( L ) & this, L & l );
887 void wait( fast_cond_var( L ) & this, L & l, uintptr_t info );
888
889
890 //-----------------------------------------------------------------------------
891 // pthread_cond_var
892 //
893 // - cond var with minimal footprint
894 // - supports operations needed for phthread cond
895
896 struct pthread_cond_var {
897 dlist( info_thread( L ) ) blocked_threads;
898 __spinlock_t lock;
899 };
900
901 void ?{}( pthread_cond_var( L ) & this );
902 void ^?{}( pthread_cond_var( L ) & this );
903
904 bool notify_one( pthread_cond_var( L ) & this );
905 bool notify_all( pthread_cond_var( L ) & this );
906
907 uintptr_t front( pthread_cond_var( L ) & this );
908 bool empty ( pthread_cond_var( L ) & this );
909
910 void wait( pthread_cond_var( L ) & this, L & l );
911 void wait( pthread_cond_var( L ) & this, L & l, uintptr_t info );
912 bool wait( pthread_cond_var( L ) & this, L & l, timespec t );
913 bool wait( pthread_cond_var( L ) & this, L & l, uintptr_t info, timespec t );
914}
Note: See TracBrowser for help on using the repository browser.