source: libcfa/src/concurrency/locks.hfa@ 822ae48

Last change on this file since 822ae48 was 822ae48, checked in by Peter A. Buhr <pabuhr@…>, 5 days ago

update semaphore lock

  • Property mode set to 100644
File size: 29.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By : Peter A. Buhr
13// Last Modified On : Mon Nov 17 20:48:55 2025
14// Update Count : 63
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "collections/lockfree.hfa"
24#include "collections/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include "select.hfa"
33
34// futex headers
35#include <linux/futex.h> // Definition of FUTEX_* constants
36#include <sys/syscall.h> // Definition of SYS_* constants
37#include <unistd.h> // Definition of syscall routine
38
39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
45forall( L & /*| sized( L )*/ )
46trait is_blocking_lock {
47 // For synchronization locks to use when acquiring
48 void on_notify( L &, struct thread$ * );
49
50 // For synchronization locks to use when releasing
51 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53 // to set recursion count after getting signalled;
54 void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
58 pp_fn( pp_datum );
59 park();
60}
61
62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
66
67#define DEFAULT_ON_WAIT( lock_type ) \
68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69 unlock( this ); \
70 pre_park_then_park( pp_fn, pp_datum ); \
71 return 0; \
72 }
73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
81
82
83//-----------------------------------------------------------------------------
84// Semaphore, counting
85
86struct semaphore {
87 ssize_t count$; // - => # waiting threads, 0 => block, + => acquire
88 __spinlock_t lock$; // protect blocking-lock critical sections
89 __queue_t( thread$ ) waiting$; // waiting threads
90};
91
92void ?{}( semaphore & sem, ssize_t count = 1 );
93// Return values are used for composition. Calling threads know if a thread is unblocked, which can be useful for
94// debugging, performance instrumentation and other metadata tracking purposes.
95bool P( semaphore & sem );
96static inline bool P( semaphore & sem, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem ); }
97bool P( semaphore & sem, semaphore & lock ); // atomic block and release
98bool try_P( semaphore & sem );
99static inline bool P( semaphore & sem, semaphore & lock, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem, lock ); }
100bool V( semaphore & sem );
101size_t V( semaphore & sem, size_t count );
102static inline uintptr_t front( semaphore & sem ) with( sem ) { // return shadow information for first waiting thread
103 #ifdef __CFA_DEBUG__
104 if ( waiting$ ) { // condition queue must not be empty
105 abort( "Attempt to access user data on an empty semaphore lock.\n"
106 "Possible cause is not checking if the condition lock is empty before reading stored data." );
107 } // if
108 #endif // __CFA_DEBUG__
109 return waiting$.head->shadow$; // return condition information stored with blocked task
110}
111static inline ssize_t counter( semaphore & sem ) with( sem ) { return count$; } // - => # waiting threads, 0 => block, + => acquire
112//static inline int ?!=?( semaphore & sem, zero_t ) with( sem ) { return count$ != 0; } // empty waiting queue
113static inline bool empty( semaphore & sem ) with( sem ) { return count$ == 0; } // empty waiting queue
114
115
116//----------
117struct single_acquisition_lock {
118 inline blocking_lock;
119};
120
121static inline void ?{}( single_acquisition_lock & this ) { ((blocking_lock &)this){ false, false }; }
122static inline void ^?{}( single_acquisition_lock & this ) {}
123static inline void lock( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); }
124static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
125static inline void unlock( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); }
126static inline size_t on_wait( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
127static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
128static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
129static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
130static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
131static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
132__CFA_SELECT_GET_TYPE( single_acquisition_lock );
133
134//----------
135struct owner_lock {
136 inline blocking_lock;
137};
138
139static inline void ?{}( owner_lock & this ) { ((blocking_lock &)this){ true, true }; }
140static inline void ^?{}( owner_lock & this ) {}
141static inline void lock( owner_lock & this ) { lock( (blocking_lock &)this ); }
142static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
143static inline void unlock( owner_lock & this ) { unlock( (blocking_lock &)this ); }
144static inline size_t on_wait( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
145static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
146static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
147static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
148static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
149static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
150__CFA_SELECT_GET_TYPE( owner_lock );
151
152//-----------------------------------------------------------------------------
153// MCS Lock
154struct mcs_node {
155 mcs_node * volatile next;
156 single_sem sem;
157};
158
159static inline void ?{}( mcs_node & this ) { this.next = 0p; }
160
161static inline mcs_node * volatile & next( mcs_node * node ) {
162 return node->next;
163}
164
165struct mcs_lock {
166 mcs_queue( mcs_node ) queue;
167};
168
169static inline void lock( mcs_lock & l, mcs_node & n ) {
170 if ( push( l.queue, &n ) )
171 wait( n.sem );
172}
173
174static inline void unlock( mcs_lock & l, mcs_node & n ) {
175 mcs_node * nxt = advance( l.queue, &n );
176 if ( nxt ) post( nxt->sem );
177}
178
179//-----------------------------------------------------------------------------
180// MCS Spin Lock
181// - No recursive acquisition
182// - Needs to be released by owner
183
184struct mcs_spin_node {
185 mcs_spin_node * volatile next;
186 volatile bool locked;
187};
188
189struct mcs_spin_queue {
190 mcs_spin_node * volatile tail;
191};
192
193static inline void ?{}( mcs_spin_node & this ) { this.next = 0p; this.locked = true; }
194
195struct mcs_spin_lock {
196 mcs_spin_queue queue;
197};
198
199static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
200 n.locked = true;
201
202 #if defined( __ARM_ARCH )
203 __asm__ __volatile__ ( "DMB ISH" ::: );
204 #endif
205
206 mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
207 if ( prev_val == 0p ) return;
208 prev_val->next = &n;
209
210 #if defined( __ARM_ARCH )
211 __asm__ __volatile__ ( "DMB ISH" ::: );
212 #endif
213
214 while ( __atomic_load_n( &n.locked, __ATOMIC_RELAXED ) ) Pause();
215
216 #if defined( __ARM_ARCH )
217 __asm__ __volatile__ ( "DMB ISH" ::: );
218 #endif
219}
220
221static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n ) {
222 #if defined( __ARM_ARCH )
223 __asm__ __volatile__ ( "DMB ISH" ::: );
224 #endif
225
226 mcs_spin_node * n_ptr = &n;
227 if ( __atomic_compare_exchange_n( &l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return;
228 while ( __atomic_load_n( &n.next, __ATOMIC_RELAXED ) == 0p ) Pause();
229
230 #if defined( __ARM_ARCH )
231 __asm__ __volatile__ ( "DMB ISH" ::: );
232 #endif
233
234 n.next->locked = false;
235}
236
237//-----------------------------------------------------------------------------
238// futex_mutex
239
240// - Kernel thd blocking alternative to the spinlock
241// - No ownership (will deadlock on reacq)
242// - no reacq on wakeup
243struct futex_mutex {
244 // lock state any state other than UNLOCKED is locked
245 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
246
247 // stores a lock state
248 int val;
249};
250
251// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
252static inline int futex( int *uaddr, int futex_op, int val ) {
253 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
254}
255
256static inline void ?{}( futex_mutex & this ) with( this ) { val = 0; }
257
258static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with( this ) {
259 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
260}
261
262static inline int internal_exchange( futex_mutex & this ) with( this ) {
263 return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE );
264}
265
266// if this is called recursively IT WILL DEADLOCK!!!!!
267static inline void lock( futex_mutex & this ) with( this ) {
268 int state;
269
270 for ( spin; 4 ~ 1024 ~ spin ) {
271 state = 0;
272 // if unlocked, lock and return
273 if ( internal_try_lock( this, state ) ) return;
274 if ( state == 2 ) break;
275 for ( spin ) Pause();
276 }
277
278 // if not in contended state, set to be in contended state
279 if ( state != 2 ) state = internal_exchange( this );
280
281 // block and spin until we win the lock
282 while ( state != 0 ) {
283 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
284 state = internal_exchange( this );
285 }
286}
287
288static inline void unlock( futex_mutex & this ) with( this ) {
289 // if uncontended do atomic unlock and then return
290 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
291
292 // otherwise threads are blocked so we must wake one
293 futex(( int *)&val, FUTEX_WAKE, 1 );
294}
295
296DEFAULT_ON_NOTIFY( futex_mutex )
297DEFAULT_ON_WAIT( futex_mutex )
298DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
299
300//-----------------------------------------------------------------------------
301// go_mutex
302
303// - Kernel thd blocking alternative to the spinlock
304// - No ownership (will deadlock on reacq)
305// - Golang's flavour of mutex
306// - Impl taken from Golang: src/runtime/lock_futex.go
307struct go_mutex {
308 // lock state any state other than UNLOCKED is locked
309 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
310
311 // stores a lock state
312 int val;
313};
314static inline void ?{}( go_mutex & this ) with( this ) { val = 0; }
315static inline void ?{}( go_mutex & this, go_mutex this2 ) = void;
316static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
317
318static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with( this ) {
319 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
320}
321
322static inline int internal_exchange( go_mutex & this, int swap ) with( this ) {
323 return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE );
324}
325
326// if this is called recursively IT WILL DEADLOCK!!!!!
327static inline void lock( go_mutex & this ) with( this ) {
328 int state, init_state;
329
330 // speculative grab
331 state = internal_exchange( this, 1 );
332 if ( ! state ) return; // state == 0
333 init_state = state;
334 for () {
335 for ( 4 ) {
336 while ( ! val ) { // lock unlocked
337 state = 0;
338 if ( internal_try_lock( this, state, init_state ) ) return;
339 }
340 for ( 30 ) Pause();
341 }
342
343 while ( ! val ) { // lock unlocked
344 state = 0;
345 if ( internal_try_lock( this, state, init_state ) ) return;
346 }
347 sched_yield();
348
349 // if not in contended state, set to be in contended state
350 state = internal_exchange( this, 2 );
351 if ( ! state ) return; // state == 0
352 init_state = 2;
353 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
354 }
355}
356
357static inline void unlock( go_mutex & this ) with( this ) {
358 // if uncontended do atomic unlock and then return
359 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
360
361 // otherwise threads are blocked so we must wake one
362 futex( (int *)&val, FUTEX_WAKE, 1 );
363}
364
365DEFAULT_ON_NOTIFY( go_mutex )
366DEFAULT_ON_WAIT( go_mutex )
367DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
368
369//-----------------------------------------------------------------------------
370// Exponential backoff then block lock
371struct exp_backoff_then_block_lock {
372 // Spin lock used for mutual exclusion
373 __spinlock_t spinlock;
374
375 // List of blocked threads
376 dlist( thread$ ) blocked_threads;
377
378 // Used for comparing and exchanging
379 volatile size_t lock_value;
380};
381
382static inline void ?{}( exp_backoff_then_block_lock & this ) {
383 this.spinlock{};
384 this.blocked_threads{};
385 this.lock_value = 0;
386}
387static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
388static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
389
390static inline void ^?{}( exp_backoff_then_block_lock & this ){}
391
392static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this ) {
393 return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED );
394}
395
396static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
397
398static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this ) {
399 return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
400}
401
402static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
403 lock( spinlock __cfaabi_dbg_ctx2 );
404 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
405 unlock( spinlock );
406 return true;
407 }
408 insert_last( blocked_threads, *active_thread() );
409 unlock( spinlock );
410 park( );
411 return true;
412}
413
414static inline void lock( exp_backoff_then_block_lock & this ) with( this ) {
415 size_t compare_val = 0;
416 int spin = 4;
417
418 // linear backoff
419 for () {
420 compare_val = 0;
421 if ( internal_try_lock( this, compare_val ) ) return;
422 if ( compare_val == 2 ) break;
423 for ( spin ) Pause();
424 if ( spin >= 1024 ) break;
425 spin += spin;
426 }
427
428 if ( 2 != compare_val && try_lock_contention( this ) ) return;
429 // block until signalled
430 while ( block( this ) ) if ( try_lock_contention( this ) ) return;
431}
432
433static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
434 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
435 lock( spinlock __cfaabi_dbg_ctx2 );
436 thread$ * t = &remove_first( blocked_threads );
437 unlock( spinlock );
438 unpark( t );
439}
440
441DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
442DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
443DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
444
445//-----------------------------------------------------------------------------
446// Fast Block Lock
447
448// minimal blocking lock
449// - No reacquire for cond var
450// - No recursive acquisition
451// - No ownership
452struct fast_block_lock {
453 // List of blocked threads
454 dlist( thread$ ) blocked_threads;
455
456 // Spin lock used for mutual exclusion
457 __spinlock_t lock;
458
459 // flag showing if lock is held
460 bool held:1;
461};
462
463static inline void ?{}( fast_block_lock & this ) with( this ) {
464 lock{};
465 blocked_threads{};
466 held = false;
467}
468static inline void ^?{}( fast_block_lock & this ) {}
469static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
470static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
471
472// if this is called recursively IT WILL DEADLOCK!!!!!
473static inline void lock( fast_block_lock & this ) with( this ) {
474 lock( lock __cfaabi_dbg_ctx2 );
475 if ( held ) {
476 insert_last( blocked_threads, *active_thread() );
477 unlock( lock );
478 park( );
479 return;
480 }
481 held = true;
482 unlock( lock );
483}
484
485static inline void unlock( fast_block_lock & this ) with( this ) {
486 lock( lock __cfaabi_dbg_ctx2 );
487 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
488 thread$ * t = &remove_first( blocked_threads );
489 held = ( t ? true : false );
490 unpark( t );
491 unlock( lock );
492}
493
494static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
495 lock( lock __cfaabi_dbg_ctx2 );
496 insert_last( blocked_threads, *t );
497 unlock( lock );
498}
499DEFAULT_ON_WAIT( fast_block_lock )
500DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
501
502//-----------------------------------------------------------------------------
503// simple_owner_lock
504
505// pthread owner lock
506// - reacquire for cond var
507// - recursive acquisition
508// - ownership
509struct simple_owner_lock {
510 // List of blocked threads
511 dlist( select_node ) blocked_threads;
512
513 // Spin lock used for mutual exclusion
514 __spinlock_t lock;
515
516 // owner showing if lock is held
517 struct thread$ * owner;
518
519 size_t recursion_count;
520};
521
522static inline void ?{}( simple_owner_lock & this ) with( this ) {
523 lock{};
524 blocked_threads{};
525 owner = 0p;
526 recursion_count = 0;
527}
528static inline void ^?{}( simple_owner_lock & this ) {}
529static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
530static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
531
532static inline void lock( simple_owner_lock & this ) with( this ) {
533 if ( owner == active_thread() ) {
534 recursion_count++;
535 return;
536 }
537 lock( lock __cfaabi_dbg_ctx2 );
538
539 if ( owner != 0p ) {
540 select_node node;
541 insert_last( blocked_threads, node );
542 unlock( lock );
543 park( );
544 return;
545 }
546 owner = active_thread();
547 recursion_count = 1;
548 unlock( lock );
549}
550
551static inline void pop_node( simple_owner_lock & this ) with( this ) {
552 __handle_waituntil_OR( blocked_threads );
553 select_node * node = &remove_first( blocked_threads );
554 if ( node ) {
555 owner = node->blocked_thread;
556 recursion_count = 1;
557 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
558 wake_one( blocked_threads, *node );
559 } else {
560 owner = 0p;
561 recursion_count = 0;
562 }
563}
564
565static inline void unlock( simple_owner_lock & this ) with( this ) {
566 lock( lock __cfaabi_dbg_ctx2 );
567 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
568 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
569 // if recursion count is zero release lock and set new owner if one is waiting
570 recursion_count--;
571 if ( recursion_count == 0 ) {
572 pop_node( this );
573 }
574 unlock( lock );
575}
576
577static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this ) {
578 lock( lock __cfaabi_dbg_ctx2 );
579 // lock held
580 if ( owner != 0p ) {
581 insert_last( blocked_threads, *(select_node *)t->link_node );
582 }
583 // lock not held
584 else {
585 owner = t;
586 recursion_count = 1;
587 unpark( t );
588 }
589 unlock( lock );
590}
591
592static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this ) {
593 lock( lock __cfaabi_dbg_ctx2 );
594 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
595 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
596
597 size_t ret = recursion_count;
598
599 pop_node( this );
600
601 select_node node;
602 active_thread()->link_node = (void *)&node;
603 unlock( lock );
604
605 pre_park_then_park( pp_fn, pp_datum );
606
607 return ret;
608}
609
610static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this ) { recursion_count = recursion; }
611
612// waituntil() support
613static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
614 lock( lock __cfaabi_dbg_ctx2 );
615
616 // check if we can complete operation. If so race to establish winner in special OR case
617 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
618 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
619 unlock( lock );
620 return false;
621 }
622 }
623
624 if ( owner == active_thread() ) {
625 recursion_count++;
626 if ( node.park_counter ) __make_select_node_available( node );
627 unlock( lock );
628 return true;
629 }
630
631 if ( owner != 0p ) {
632 insert_last( blocked_threads, node );
633 unlock( lock );
634 return false;
635 }
636
637 owner = active_thread();
638 recursion_count = 1;
639
640 if ( node.park_counter ) __make_select_node_available( node );
641 unlock( lock );
642 return true;
643}
644
645static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
646 lock( lock __cfaabi_dbg_ctx2 );
647 if ( isListed( node ) ) {
648 remove( node );
649 unlock( lock );
650 return false;
651 }
652
653 if ( owner == active_thread() ) {
654 recursion_count--;
655 if ( recursion_count == 0 ) {
656 pop_node( this );
657 }
658 }
659 unlock( lock );
660 return false;
661}
662
663static inline bool on_selected( simple_owner_lock & /*this*/, select_node & /*node*/ ) { return true; }
664__CFA_SELECT_GET_TYPE( simple_owner_lock );
665
666//-----------------------------------------------------------------------------
667// Spin Queue Lock
668
669// - No reacquire for cond var
670// - No recursive acquisition
671// - No ownership
672// - spin lock with no locking/atomics in unlock
673struct spin_queue_lock {
674 // Spin lock used for mutual exclusion
675 mcs_spin_lock lock;
676
677 // flag showing if lock is held
678 volatile bool held;
679};
680
681static inline void ?{}( spin_queue_lock & this ) with( this ) {
682 lock{};
683 held = false;
684}
685static inline void ^?{}( spin_queue_lock & this ) {}
686static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
687static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
688
689// if this is called recursively IT WILL DEADLOCK!
690static inline void lock( spin_queue_lock & this ) with( this ) {
691 mcs_spin_node node;
692 lock( lock, node );
693 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
694 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
695 unlock( lock, node );
696}
697
698static inline void unlock( spin_queue_lock & this ) with( this ) {
699 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
700}
701
702DEFAULT_ON_NOTIFY( spin_queue_lock )
703DEFAULT_ON_WAIT( spin_queue_lock )
704DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
705
706//-----------------------------------------------------------------------------
707// MCS Block Spin Lock
708
709// - No reacquire for cond var
710// - No recursive acquisition
711// - No ownership
712// - Blocks but first node spins (like spin queue but blocking for not first thd)
713struct mcs_block_spin_lock {
714 // Spin lock used for mutual exclusion
715 mcs_lock lock;
716
717 // flag showing if lock is held
718 volatile bool held;
719};
720
721static inline void ?{}( mcs_block_spin_lock & this ) with( this ) {
722 lock{};
723 held = false;
724}
725static inline void ^?{}( mcs_block_spin_lock & this ) {}
726static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
727static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
728
729// if this is called recursively IT WILL DEADLOCK!!!!!
730static inline void lock( mcs_block_spin_lock & this ) with( this ) {
731 mcs_node node;
732 lock( lock, node );
733 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
734 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
735 unlock( lock, node );
736}
737
738static inline void unlock( mcs_block_spin_lock & this ) with( this ) {
739 __atomic_store_n( &held, false, __ATOMIC_SEQ_CST );
740}
741
742DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
743DEFAULT_ON_WAIT( mcs_block_spin_lock )
744DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
745
746//-----------------------------------------------------------------------------
747// Block Spin Lock
748
749// - No reacquire for cond var
750// - No recursive acquisition
751// - No ownership
752// - Blocks but first node spins (like spin queue but blocking for not first thd)
753struct block_spin_lock {
754 // Spin lock used for mutual exclusion
755 fast_block_lock lock;
756
757 // flag showing if lock is held
758 volatile bool held;
759};
760
761static inline void ?{}( block_spin_lock & this ) with( this ) {
762 lock{};
763 held = false;
764}
765static inline void ^?{}( block_spin_lock & this ) {}
766static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
767static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
768
769// if this is called recursively IT WILL DEADLOCK!!!!!
770static inline void lock( block_spin_lock & this ) with( this ) {
771 lock( lock );
772 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
773 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
774 unlock( lock );
775}
776
777static inline void unlock( block_spin_lock & this ) with( this ) {
778 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
779}
780
781static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock ) {
782 // first we acquire internal fast_block_lock
783 lock( lock __cfaabi_dbg_ctx2 );
784 if ( held ) { // if internal fast_block_lock is held
785 insert_last( blocked_threads, *t );
786 unlock( lock );
787 return;
788 }
789 // if internal fast_block_lock is not held
790 held = true;
791 unlock( lock );
792
793 unpark( t );
794}
795DEFAULT_ON_WAIT( block_spin_lock )
796 static inline void on_wakeup( block_spin_lock & this, size_t /*recursion*/ ) with( this ) {
797 // now we acquire the entire block_spin_lock upon waking up
798 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
799 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
800 unlock( lock ); // Now we release the internal fast_spin_lock
801}
802
803//-----------------------------------------------------------------------------
804// // info_thread
805// // the info thread is a wrapper around a thread used
806// // to store extra data for use in the condition variable
807forall( L & | is_blocking_lock( L ) ) {
808 struct info_thread;
809}
810
811//-----------------------------------------------------------------------------
812// Synchronization Locks
813forall( L & | is_blocking_lock( L ) ) {
814
815 //-----------------------------------------------------------------------------
816 // cond_lock
817
818 // The multi-tool condition variable
819 // - can pass timeouts to wait for either a signal or timeout
820 // - can wait without passing a lock
821 // - can have waiters reacquire different locks while waiting on the same cond var
822 // - has shadow queue
823 // - can be signalled outside of critical sections with no locks held
824 struct cond_lock {
825 // Spin lock used for mutual exclusion
826 __spinlock_t lock;
827
828 // List of blocked threads
829 dlist( info_thread( L ) ) blocked_threads;
830
831 // Count of current blocked threads
832 int count;
833 };
834
835 void ?{}( cond_lock( L ) & this );
836 void ^?{}( cond_lock( L ) & this );
837
838 bool notify_one( cond_lock( L ) & this );
839 bool notify_all( cond_lock( L ) & this );
840
841 uintptr_t front( cond_lock( L ) & this );
842
843 bool empty ( cond_lock( L ) & this );
844 int counter( cond_lock( L ) & this );
845
846 void wait( cond_lock( L ) & this );
847 void wait( cond_lock( L ) & this, uintptr_t info );
848 bool wait( cond_lock( L ) & this, Duration duration );
849 bool wait( cond_lock( L ) & this, uintptr_t info, Duration duration );
850
851 void wait( cond_lock( L ) & this, L & l );
852 void wait( cond_lock( L ) & this, L & l, uintptr_t info );
853 bool wait( cond_lock( L ) & this, L & l, Duration duration );
854 bool wait( cond_lock( L ) & this, L & l, uintptr_t info, Duration duration );
855
856 //-----------------------------------------------------------------------------
857 // fast_cond_var
858
859 // The trimmed and slim condition variable
860 // - no internal lock so you must hold a lock while using this cond var
861 // - signalling without holding branded lock is UNSAFE!
862 // - only allows usage of one lock, cond var is branded after usage
863
864 struct fast_cond_var {
865 // List of blocked threads
866 dlist( info_thread( L ) ) blocked_threads;
867 #ifdef __CFA_DEBUG__
868 L * lock_used;
869 #endif
870 };
871
872 void ?{}( fast_cond_var( L ) & this );
873 void ^?{}( fast_cond_var( L ) & this );
874
875 bool notify_one( fast_cond_var( L ) & this );
876 bool notify_all( fast_cond_var( L ) & this );
877
878 uintptr_t front( fast_cond_var( L ) & this );
879 bool empty ( fast_cond_var( L ) & this );
880
881 void wait( fast_cond_var( L ) & this, L & l );
882 void wait( fast_cond_var( L ) & this, L & l, uintptr_t info );
883
884
885 //-----------------------------------------------------------------------------
886 // pthread_cond_var
887 //
888 // - cond var with minimal footprint
889 // - supports operations needed for phthread cond
890
891 struct pthread_cond_var {
892 dlist( info_thread( L ) ) blocked_threads;
893 __spinlock_t lock;
894 };
895
896 void ?{}( pthread_cond_var( L ) & this );
897 void ^?{}( pthread_cond_var( L ) & this );
898
899 bool notify_one( pthread_cond_var( L ) & this );
900 bool notify_all( pthread_cond_var( L ) & this );
901
902 uintptr_t front( pthread_cond_var( L ) & this );
903 bool empty ( pthread_cond_var( L ) & this );
904
905 void wait( pthread_cond_var( L ) & this, L & l );
906 void wait( pthread_cond_var( L ) & this, L & l, uintptr_t info );
907 bool wait( pthread_cond_var( L ) & this, L & l, timespec t );
908 bool wait( pthread_cond_var( L ) & this, L & l, uintptr_t info, timespec t );
909}
Note: See TracBrowser for help on using the repository browser.