source: libcfa/src/concurrency/locks.hfa@ b94579a

Last change on this file since b94579a was b94579a, checked in by Peter A. Buhr <pabuhr@…>, 5 days ago

update semaphore lock

  • Property mode set to 100644
File size: 29.4 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
[a6b48f6]12// Last Modified By : Peter A. Buhr
[b94579a]13// Last Modified On : Wed Nov 5 10:27:45 2025
14// Update Count : 61
[ab1b971]15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[55b060d]23#include "collections/lockfree.hfa"
24#include "collections/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[beeff61e]32#include "select.hfa"
33
[b77f0e1]34// futex headers
[a6b48f6]35#include <linux/futex.h> // Definition of FUTEX_* constants
36#include <sys/syscall.h> // Definition of SYS_* constants
37#include <unistd.h> // Definition of syscall routine
[b77f0e1]38
[fece3d9]39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
[a6b48f6]45forall( L & /*| sized( L )*/ )
[fece3d9]46trait is_blocking_lock {
47 // For synchronization locks to use when acquiring
48 void on_notify( L &, struct thread$ * );
49
50 // For synchronization locks to use when releasing
51 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53 // to set recursion count after getting signalled;
54 void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
[6b33e89]58 pp_fn( pp_datum );
59 park();
[fece3d9]60}
61
[5a05946]62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
[6b33e89]65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
[5a05946]66
67#define DEFAULT_ON_WAIT( lock_type ) \
[6b33e89]68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69 unlock( this ); \
70 pre_park_then_park( pp_fn, pp_datum ); \
71 return 0; \
72 }
[5a05946]73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
[6b33e89]76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
[5a05946]77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
[6b33e89]80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
[5a05946]81
82
[f4ec5e45]83//-----------------------------------------------------------------------------
[95330c33]84// Semaphore, counting
85
[f4ec5e45]86struct semaphore {
[95330c33]87 // PRIVATE
88 ssize_t count$; // - => # waiting threads, 0 => block, + => acquire
89 __spinlock_t lock$; // protect blocking-lock critical sections
90 __queue_t( thread$ ) waiting$; // waiting threads
[f4ec5e45]91};
92
[95330c33]93void ?{}( semaphore & sem, ssize_t count = 1 );
94// Return values are used for composition. Calling threads know if a thread is unblocked, which can be useful for
95// debugging, performance instrumentation and other metadata tracking purposes.
96bool P( semaphore & sem );
97static inline bool P( semaphore & sem, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem ); }
98bool P( semaphore & sem, semaphore & lock ); // atomic block and release
99bool try_P( semaphore & sem );
100static inline bool P( semaphore & sem, semaphore & lock, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem, lock ); }
[b94579a]101thread$ * V( semaphore & sem, const bool doUnpark = true );
102static inline bool V( semaphore & sem ) with( sem ) { return V( sem, true ) != 0p; }
[95330c33]103bool V( semaphore & sem, size_t count );
104static inline uintptr_t front( semaphore & sem ) with( sem ) { // return shadow information for first waiting thread
105 #ifdef __CFA_DEBUG__
106 if ( waiting$ ) { // condition queue must not be empty
107 abort( "Attempt to access user data on an empty semaphore lock.\n"
108 "Possible cause is not checking if the condition lock is empty before reading stored data." );
109 } // if
110 #endif // __CFA_DEBUG__
111 return waiting$.head->shadow$; // return condition information stored with blocked task
112}
113static inline ssize_t counter( semaphore & sem ) with( sem ) { return count$; } // - => # waiting threads, 0 => block, + => acquire
114//static inline int ?!=?( semaphore & sem, zero_t ) with( sem ) { return count$ != 0; } // empty waiting queue
115static inline bool empty( semaphore & sem ) with( sem ) { return count$ == 0; } // empty waiting queue
116
[f4ec5e45]117
[ab1b971]118//----------
119struct single_acquisition_lock {
120 inline blocking_lock;
121};
122
[a6b48f6]123static inline void ?{}( single_acquisition_lock & this ) { ((blocking_lock &)this){ false, false }; }
[ab1b971]124static inline void ^?{}( single_acquisition_lock & this ) {}
[a6b48f6]125static inline void lock( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); }
126static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
127static inline void unlock( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); }
128static inline size_t on_wait( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
129static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
130static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
131static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
132static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
133static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[bf55f32]134__CFA_SELECT_GET_TYPE( single_acquisition_lock );
[ab1b971]135
136//----------
137struct owner_lock {
138 inline blocking_lock;
139};
140
[a6b48f6]141static inline void ?{}( owner_lock & this ) { ((blocking_lock &)this){ true, true }; }
[ab1b971]142static inline void ^?{}( owner_lock & this ) {}
[a6b48f6]143static inline void lock( owner_lock & this ) { lock( (blocking_lock &)this ); }
144static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
145static inline void unlock( owner_lock & this ) { unlock( (blocking_lock &)this ); }
146static inline size_t on_wait( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
147static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
148static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
149static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
150static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
151static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[bf55f32]152__CFA_SELECT_GET_TYPE( owner_lock );
[ab1b971]153
[7f958c4]154//-----------------------------------------------------------------------------
155// MCS Lock
[f4ec5e45]156struct mcs_node {
157 mcs_node * volatile next;
158 single_sem sem;
159};
160
[7a2c6b18]161static inline void ?{}( mcs_node & this ) { this.next = 0p; }
[f4ec5e45]162
[6b33e89]163static inline mcs_node * volatile & next( mcs_node * node ) {
[f4ec5e45]164 return node->next;
165}
166
167struct mcs_lock {
[a6b48f6]168 mcs_queue( mcs_node ) queue;
[f4ec5e45]169};
170
[7a2c6b18]171static inline void lock( mcs_lock & l, mcs_node & n ) {
[a6b48f6]172 if ( push( l.queue, &n ) )
173 wait( n.sem );
[f4ec5e45]174}
175
[a6b48f6]176static inline void unlock( mcs_lock & l, mcs_node & n ) {
[6b33e89]177 mcs_node * nxt = advance( l.queue, &n );
178 if ( nxt ) post( nxt->sem );
[f4ec5e45]179}
180
[f835806]181//-----------------------------------------------------------------------------
182// MCS Spin Lock
183// - No recursive acquisition
184// - Needs to be released by owner
185
186struct mcs_spin_node {
187 mcs_spin_node * volatile next;
[db7a3ad]188 volatile bool locked;
[f835806]189};
190
191struct mcs_spin_queue {
192 mcs_spin_node * volatile tail;
193};
194
[7a2c6b18]195static inline void ?{}( mcs_spin_node & this ) { this.next = 0p; this.locked = true; }
[f835806]196
197struct mcs_spin_lock {
198 mcs_spin_queue queue;
199};
200
[7a2c6b18]201static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
[6b33e89]202 n.locked = true;
[8df19af]203
[a6b48f6]204 #if defined( __ARM_ARCH )
[2ad5e1d5]205 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]206 #endif
207
[6b33e89]208 mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
209 if ( prev_val == 0p ) return;
210 prev_val->next = &n;
[8df19af]211
[a6b48f6]212 #if defined( __ARM_ARCH )
[2ad5e1d5]213 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]214 #endif
215
[a6b48f6]216 while ( __atomic_load_n( &n.locked, __ATOMIC_RELAXED ) ) Pause();
[8df19af]217
[a6b48f6]218 #if defined( __ARM_ARCH )
[2ad5e1d5]219 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]220 #endif
[f835806]221}
222
[a6b48f6]223static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n ) {
224 #if defined( __ARM_ARCH )
[2ad5e1d5]225 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]226 #endif
227
[f835806]228 mcs_spin_node * n_ptr = &n;
[a6b48f6]229 if ( __atomic_compare_exchange_n( &l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return;
230 while ( __atomic_load_n( &n.next, __ATOMIC_RELAXED ) == 0p ) Pause();
[8df19af]231
[a6b48f6]232 #if defined( __ARM_ARCH )
[2ad5e1d5]233 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]234 #endif
235
[9e3d123]236 n.next->locked = false;
[f835806]237}
238
[b77f0e1]239//-----------------------------------------------------------------------------
240// futex_mutex
241
242// - Kernel thd blocking alternative to the spinlock
243// - No ownership (will deadlock on reacq)
[5a05946]244// - no reacq on wakeup
[b77f0e1]245struct futex_mutex {
246 // lock state any state other than UNLOCKED is locked
247 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
248
249 // stores a lock state
250 int val;
251};
252
253// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
[a6b48f6]254static inline int futex( int *uaddr, int futex_op, int val ) {
[6b33e89]255 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
[b77f0e1]256}
257
[a6b48f6]258static inline void ?{}( futex_mutex & this ) with( this ) { val = 0; }
[b77f0e1]259
[a6b48f6]260static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with( this ) {
261 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
[b77f0e1]262}
263
[a6b48f6]264static inline int internal_exchange( futex_mutex & this ) with( this ) {
265 return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE );
[b77f0e1]266}
267
268// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]269static inline void lock( futex_mutex & this ) with( this ) {
[b77f0e1]270 int state;
271
[a6b48f6]272 for ( spin; 4 ~ 1024 ~ spin ) {
[a45e21c]273 state = 0;
274 // if unlocked, lock and return
[a6b48f6]275 if ( internal_try_lock( this, state ) ) return;
276 if ( state == 2 ) break;
277 for ( spin ) Pause();
[a45e21c]278 }
[b77f0e1]279
280 // if not in contended state, set to be in contended state
[a6b48f6]281 if ( state != 2 ) state = internal_exchange( this );
[b77f0e1]282
283 // block and spin until we win the lock
[a6b48f6]284 while ( state != 0 ) {
285 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
286 state = internal_exchange( this );
[b77f0e1]287 }
288}
289
[a6b48f6]290static inline void unlock( futex_mutex & this ) with( this ) {
[a45e21c]291 // if uncontended do atomic unlock and then return
[6b33e89]292 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
[b77f0e1]293
294 // otherwise threads are blocked so we must wake one
[a6b48f6]295 futex(( int *)&val, FUTEX_WAKE, 1 );
[b77f0e1]296}
297
[5a05946]298DEFAULT_ON_NOTIFY( futex_mutex )
299DEFAULT_ON_WAIT( futex_mutex )
300DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
[b77f0e1]301
[a45e21c]302//-----------------------------------------------------------------------------
303// go_mutex
304
305// - Kernel thd blocking alternative to the spinlock
306// - No ownership (will deadlock on reacq)
307// - Golang's flavour of mutex
308// - Impl taken from Golang: src/runtime/lock_futex.go
309struct go_mutex {
310 // lock state any state other than UNLOCKED is locked
311 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
312
313 // stores a lock state
314 int val;
315};
[a6b48f6]316static inline void ?{}( go_mutex & this ) with( this ) { val = 0; }
[7a2c6b18]317static inline void ?{}( go_mutex & this, go_mutex this2 ) = void;
318static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
[a45e21c]319
[a6b48f6]320static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with( this ) {
321 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
[a45e21c]322}
323
[a6b48f6]324static inline int internal_exchange( go_mutex & this, int swap ) with( this ) {
325 return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE );
[a45e21c]326}
327
328// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]329static inline void lock( go_mutex & this ) with( this ) {
[a45e21c]330 int state, init_state;
331
[6b33e89]332 // speculative grab
333 state = internal_exchange( this, 1 );
334 if ( ! state ) return; // state == 0
335 init_state = state;
336 for () {
337 for ( 4 ) {
338 while ( ! val ) { // lock unlocked
339 state = 0;
340 if ( internal_try_lock( this, state, init_state ) ) return;
341 }
342 for ( 30 ) Pause();
343 }
344
345 while ( ! val ) { // lock unlocked
346 state = 0;
347 if ( internal_try_lock( this, state, init_state ) ) return;
348 }
349 sched_yield();
350
351 // if not in contended state, set to be in contended state
352 state = internal_exchange( this, 2 );
353 if ( ! state ) return; // state == 0
354 init_state = 2;
355 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
356 }
[a45e21c]357}
358
[a6b48f6]359static inline void unlock( go_mutex & this ) with( this ) {
[a45e21c]360 // if uncontended do atomic unlock and then return
[6b33e89]361 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
[a45e21c]362
363 // otherwise threads are blocked so we must wake one
[beeff61e]364 futex( (int *)&val, FUTEX_WAKE, 1 );
[a45e21c]365}
366
[5a05946]367DEFAULT_ON_NOTIFY( go_mutex )
368DEFAULT_ON_WAIT( go_mutex )
369DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
[a45e21c]370
[7f958c4]371//-----------------------------------------------------------------------------
[0cee082]372// Exponential backoff then block lock
373struct exp_backoff_then_block_lock {
[5a46e09]374 // Spin lock used for mutual exclusion
375 __spinlock_t spinlock;
376
377 // List of blocked threads
[e84ab3d]378 dlist( thread$ ) blocked_threads;
[5a46e09]379
380 // Used for comparing and exchanging
381 volatile size_t lock_value;
382};
383
[0cee082]384static inline void ?{}( exp_backoff_then_block_lock & this ) {
[5a46e09]385 this.spinlock{};
386 this.blocked_threads{};
387 this.lock_value = 0;
388}
[5a05946]389static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
390static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
[5a46e09]391
[a45e21c]392static inline void ^?{}( exp_backoff_then_block_lock & this ){}
393
[a6b48f6]394static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this ) {
395 return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED );
[5a46e09]396}
397
[beeff61e]398static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
[5a46e09]399
[a6b48f6]400static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this ) {
401 return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
[5a46e09]402}
403
[a6b48f6]404static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
[6b33e89]405 lock( spinlock __cfaabi_dbg_ctx2 );
406 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
407 unlock( spinlock );
408 return true;
409 }
410 insert_last( blocked_threads, *active_thread() );
411 unlock( spinlock );
[5a46e09]412 park( );
413 return true;
414}
415
[a6b48f6]416static inline void lock( exp_backoff_then_block_lock & this ) with( this ) {
[5a46e09]417 size_t compare_val = 0;
[b77f0e1]418 int spin = 4;
[d30e3eb]419
[5a46e09]420 // linear backoff
[a6b48f6]421 for () {
[5a46e09]422 compare_val = 0;
[a6b48f6]423 if ( internal_try_lock( this, compare_val ) ) return;
424 if ( compare_val == 2 ) break;
425 for ( spin ) Pause();
426 if ( spin >= 1024 ) break;
[5a46e09]427 spin += spin;
428 }
429
[a6b48f6]430 if ( 2 != compare_val && try_lock_contention( this ) ) return;
[b7763da]431 // block until signalled
[a6b48f6]432 while ( block( this ) ) if ( try_lock_contention( this ) ) return;
[b7763da]433}
434
[a6b48f6]435static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
[6b33e89]436 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
437 lock( spinlock __cfaabi_dbg_ctx2 );
438 thread$ * t = &remove_first( blocked_threads );
439 unlock( spinlock );
440 unpark( t );
[5a46e09]441}
442
[5a05946]443DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
444DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
445DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
[5a46e09]446
[7f958c4]447//-----------------------------------------------------------------------------
448// Fast Block Lock
449
[f835806]450// minimal blocking lock
[7f958c4]451// - No reacquire for cond var
452// - No recursive acquisition
453// - No ownership
454struct fast_block_lock {
455 // List of blocked threads
456 dlist( thread$ ) blocked_threads;
457
[f835806]458 // Spin lock used for mutual exclusion
459 __spinlock_t lock;
460
461 // flag showing if lock is held
[7f958c4]462 bool held:1;
463};
464
[a6b48f6]465static inline void ?{}( fast_block_lock & this ) with( this ) {
[7f958c4]466 lock{};
467 blocked_threads{};
468 held = false;
469}
470static inline void ^?{}( fast_block_lock & this ) {}
471static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
472static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
473
474// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]475static inline void lock( fast_block_lock & this ) with( this ) {
[7f958c4]476 lock( lock __cfaabi_dbg_ctx2 );
[b77f0e1]477 if ( held ) {
[7f958c4]478 insert_last( blocked_threads, *active_thread() );
479 unlock( lock );
480 park( );
481 return;
482 }
483 held = true;
484 unlock( lock );
485}
486
[a6b48f6]487static inline void unlock( fast_block_lock & this ) with( this ) {
[7f958c4]488 lock( lock __cfaabi_dbg_ctx2 );
489 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
[6b33e89]490 thread$ * t = &remove_first( blocked_threads );
[7f958c4]491 held = ( t ? true : false );
492 unpark( t );
493 unlock( lock );
494}
495
[a6b48f6]496static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
[6b33e89]497 lock( lock __cfaabi_dbg_ctx2 );
498 insert_last( blocked_threads, *t );
499 unlock( lock );
[b77f0e1]500}
[5a05946]501DEFAULT_ON_WAIT( fast_block_lock )
502DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
[7f958c4]503
[f835806]504//-----------------------------------------------------------------------------
505// simple_owner_lock
506
507// pthread owner lock
508// - reacquire for cond var
509// - recursive acquisition
510// - ownership
511struct simple_owner_lock {
512 // List of blocked threads
[beeff61e]513 dlist( select_node ) blocked_threads;
[f835806]514
515 // Spin lock used for mutual exclusion
516 __spinlock_t lock;
517
518 // owner showing if lock is held
519 struct thread$ * owner;
520
521 size_t recursion_count;
522};
523
[a6b48f6]524static inline void ?{}( simple_owner_lock & this ) with( this ) {
[f835806]525 lock{};
526 blocked_threads{};
527 owner = 0p;
528 recursion_count = 0;
529}
530static inline void ^?{}( simple_owner_lock & this ) {}
531static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
532static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
533
[a6b48f6]534static inline void lock( simple_owner_lock & this ) with( this ) {
[beeff61e]535 if ( owner == active_thread() ) {
[ae06e0b]536 recursion_count++;
537 return;
538 }
539 lock( lock __cfaabi_dbg_ctx2 );
540
[beeff61e]541 if ( owner != 0p ) {
[6b33e89]542 select_node node;
[beeff61e]543 insert_last( blocked_threads, node );
[ae06e0b]544 unlock( lock );
545 park( );
546 return;
547 }
548 owner = active_thread();
549 recursion_count = 1;
550 unlock( lock );
551}
552
[a6b48f6]553static inline void pop_node( simple_owner_lock & this ) with( this ) {
[6b33e89]554 __handle_waituntil_OR( blocked_threads );
555 select_node * node = &remove_first( blocked_threads );
556 if ( node ) {
557 owner = node->blocked_thread;
558 recursion_count = 1;
559 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
560 wake_one( blocked_threads, *node );
561 } else {
562 owner = 0p;
563 recursion_count = 0;
564 }
[beeff61e]565}
[ae06e0b]566
[a6b48f6]567static inline void unlock( simple_owner_lock & this ) with( this ) {
[ae06e0b]568 lock( lock __cfaabi_dbg_ctx2 );
569 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
570 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
571 // if recursion count is zero release lock and set new owner if one is waiting
572 recursion_count--;
573 if ( recursion_count == 0 ) {
[beeff61e]574 pop_node( this );
[ae06e0b]575 }
576 unlock( lock );
577}
578
[a6b48f6]579static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this ) {
[ae06e0b]580 lock( lock __cfaabi_dbg_ctx2 );
581 // lock held
582 if ( owner != 0p ) {
[beeff61e]583 insert_last( blocked_threads, *(select_node *)t->link_node );
[ae06e0b]584 }
585 // lock not held
586 else {
587 owner = t;
588 recursion_count = 1;
589 unpark( t );
590 }
[b77f0e1]591 unlock( lock );
[ae06e0b]592}
593
[a6b48f6]594static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this ) {
[ae06e0b]595 lock( lock __cfaabi_dbg_ctx2 );
596 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
597 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
598
599 size_t ret = recursion_count;
600
[beeff61e]601 pop_node( this );
[ae06e0b]602
[6b33e89]603 select_node node;
604 active_thread()->link_node = (void *)&node;
[ae06e0b]605 unlock( lock );
[fece3d9]606
[6b33e89]607 pre_park_then_park( pp_fn, pp_datum );
[beeff61e]608
[ae06e0b]609 return ret;
610}
611
[a6b48f6]612static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this ) { recursion_count = recursion; }
[beeff61e]613
614// waituntil() support
[a6b48f6]615static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
[6b33e89]616 lock( lock __cfaabi_dbg_ctx2 );
[beeff61e]617
[6b33e89]618 // check if we can complete operation. If so race to establish winner in special OR case
619 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
620 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
621 unlock( lock );
622 return false;
623 }
624 }
[beeff61e]625
[6b33e89]626 if ( owner == active_thread() ) {
[beeff61e]627 recursion_count++;
[6b33e89]628 if ( node.park_counter ) __make_select_node_available( node );
629 unlock( lock );
[beeff61e]630 return true;
631 }
632
[6b33e89]633 if ( owner != 0p ) {
[beeff61e]634 insert_last( blocked_threads, node );
635 unlock( lock );
636 return false;
637 }
[6b33e89]638
[beeff61e]639 owner = active_thread();
640 recursion_count = 1;
641
[6b33e89]642 if ( node.park_counter ) __make_select_node_available( node );
643 unlock( lock );
644 return true;
[beeff61e]645}
646
[a6b48f6]647static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
[6b33e89]648 lock( lock __cfaabi_dbg_ctx2 );
649 if ( isListed( node ) ) {
650 remove( node );
651 unlock( lock );
652 return false;
653 }
654
655 if ( owner == active_thread() ) {
656 recursion_count--;
657 if ( recursion_count == 0 ) {
658 pop_node( this );
659 }
660 }
661 unlock( lock );
662 return false;
[beeff61e]663}
664
[a6b48f6]665static inline bool on_selected( simple_owner_lock & /*this*/, select_node & /*node*/ ) { return true; }
[bf55f32]666__CFA_SELECT_GET_TYPE( simple_owner_lock );
[ae06e0b]667
[f835806]668//-----------------------------------------------------------------------------
669// Spin Queue Lock
670
671// - No reacquire for cond var
672// - No recursive acquisition
673// - No ownership
674// - spin lock with no locking/atomics in unlock
675struct spin_queue_lock {
676 // Spin lock used for mutual exclusion
677 mcs_spin_lock lock;
678
679 // flag showing if lock is held
[db7a3ad]680 volatile bool held;
[f835806]681};
682
[a6b48f6]683static inline void ?{}( spin_queue_lock & this ) with( this ) {
[f835806]684 lock{};
685 held = false;
686}
687static inline void ^?{}( spin_queue_lock & this ) {}
688static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
689static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
690
[378de69]691// if this is called recursively IT WILL DEADLOCK!
[a6b48f6]692static inline void lock( spin_queue_lock & this ) with( this ) {
[f835806]693 mcs_spin_node node;
694 lock( lock, node );
[a6b48f6]695 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
696 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
[f835806]697 unlock( lock, node );
698}
699
[a6b48f6]700static inline void unlock( spin_queue_lock & this ) with( this ) {
701 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
[f835806]702}
703
[5a05946]704DEFAULT_ON_NOTIFY( spin_queue_lock )
705DEFAULT_ON_WAIT( spin_queue_lock )
706DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
[f835806]707
708//-----------------------------------------------------------------------------
709// MCS Block Spin Lock
710
711// - No reacquire for cond var
712// - No recursive acquisition
713// - No ownership
714// - Blocks but first node spins (like spin queue but blocking for not first thd)
715struct mcs_block_spin_lock {
716 // Spin lock used for mutual exclusion
717 mcs_lock lock;
718
719 // flag showing if lock is held
[db7a3ad]720 volatile bool held;
[f835806]721};
722
[a6b48f6]723static inline void ?{}( mcs_block_spin_lock & this ) with( this ) {
[f835806]724 lock{};
725 held = false;
726}
727static inline void ^?{}( mcs_block_spin_lock & this ) {}
728static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
729static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
730
731// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]732static inline void lock( mcs_block_spin_lock & this ) with( this ) {
[f835806]733 mcs_node node;
734 lock( lock, node );
[a6b48f6]735 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
736 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
[f835806]737 unlock( lock, node );
738}
739
[a6b48f6]740static inline void unlock( mcs_block_spin_lock & this ) with( this ) {
741 __atomic_store_n( &held, false, __ATOMIC_SEQ_CST );
[f835806]742}
743
[5a05946]744DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
745DEFAULT_ON_WAIT( mcs_block_spin_lock )
746DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
[f835806]747
748//-----------------------------------------------------------------------------
749// Block Spin Lock
750
751// - No reacquire for cond var
752// - No recursive acquisition
753// - No ownership
754// - Blocks but first node spins (like spin queue but blocking for not first thd)
755struct block_spin_lock {
756 // Spin lock used for mutual exclusion
757 fast_block_lock lock;
758
759 // flag showing if lock is held
[db7a3ad]760 volatile bool held;
[f835806]761};
762
[a6b48f6]763static inline void ?{}( block_spin_lock & this ) with( this ) {
[f835806]764 lock{};
765 held = false;
766}
767static inline void ^?{}( block_spin_lock & this ) {}
768static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
769static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
770
771// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]772static inline void lock( block_spin_lock & this ) with( this ) {
[f835806]773 lock( lock );
[a6b48f6]774 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
775 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
[f835806]776 unlock( lock );
777}
778
[a6b48f6]779static inline void unlock( block_spin_lock & this ) with( this ) {
780 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
[f835806]781}
782
[a6b48f6]783static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock ) {
[b77f0e1]784 // first we acquire internal fast_block_lock
785 lock( lock __cfaabi_dbg_ctx2 );
786 if ( held ) { // if internal fast_block_lock is held
787 insert_last( blocked_threads, *t );
788 unlock( lock );
789 return;
790 }
791 // if internal fast_block_lock is not held
792 held = true;
793 unlock( lock );
794
[a6b48f6]795 unpark( t );
[b77f0e1]796}
[5a05946]797DEFAULT_ON_WAIT( block_spin_lock )
[a6b48f6]798 static inline void on_wakeup( block_spin_lock & this, size_t /*recursion*/ ) with( this ) {
[b77f0e1]799 // now we acquire the entire block_spin_lock upon waking up
[a6b48f6]800 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
801 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
[b77f0e1]802 unlock( lock ); // Now we release the internal fast_spin_lock
803}
[f835806]804
[ac5816d]805//-----------------------------------------------------------------------------
[82f4063]806// // info_thread
807// // the info thread is a wrapper around a thread used
808// // to store extra data for use in the condition variable
[a6b48f6]809forall( L & | is_blocking_lock( L ) ) {
[ac5816d]810 struct info_thread;
[848439f]811}
812
[ac5816d]813//-----------------------------------------------------------------------------
814// Synchronization Locks
[a6b48f6]815forall( L & | is_blocking_lock( L ) ) {
[7f958c4]816
817 //-----------------------------------------------------------------------------
[8dc8f68]818 // cond_lock
[7f958c4]819
820 // The multi-tool condition variable
821 // - can pass timeouts to wait for either a signal or timeout
822 // - can wait without passing a lock
823 // - can have waiters reacquire different locks while waiting on the same cond var
824 // - has shadow queue
825 // - can be signalled outside of critical sections with no locks held
[8dc8f68]826 struct cond_lock {
[848439f]827 // Spin lock used for mutual exclusion
828 __spinlock_t lock;
829
830 // List of blocked threads
[a6b48f6]831 dlist( info_thread( L ) ) blocked_threads;
[848439f]832
833 // Count of current blocked threads
834 int count;
835 };
[e84ab3d]836
[8dc8f68]837 void ?{}( cond_lock( L ) & this );
838 void ^?{}( cond_lock( L ) & this );
[848439f]839
[8dc8f68]840 bool notify_one( cond_lock( L ) & this );
841 bool notify_all( cond_lock( L ) & this );
[848439f]842
[8dc8f68]843 uintptr_t front( cond_lock( L ) & this );
[848439f]844
[8dc8f68]845 bool empty ( cond_lock( L ) & this );
846 int counter( cond_lock( L ) & this );
[848439f]847
[8dc8f68]848 void wait( cond_lock( L ) & this );
849 void wait( cond_lock( L ) & this, uintptr_t info );
850 bool wait( cond_lock( L ) & this, Duration duration );
851 bool wait( cond_lock( L ) & this, uintptr_t info, Duration duration );
[848439f]852
[8dc8f68]853 void wait( cond_lock( L ) & this, L & l );
854 void wait( cond_lock( L ) & this, L & l, uintptr_t info );
855 bool wait( cond_lock( L ) & this, L & l, Duration duration );
856 bool wait( cond_lock( L ) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]857
858 //-----------------------------------------------------------------------------
859 // fast_cond_var
860
861 // The trimmed and slim condition variable
862 // - no internal lock so you must hold a lock while using this cond var
863 // - signalling without holding branded lock is UNSAFE!
864 // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]865
[7f958c4]866 struct fast_cond_var {
867 // List of blocked threads
[a6b48f6]868 dlist( info_thread( L ) ) blocked_threads;
[7f958c4]869 #ifdef __CFA_DEBUG__
870 L * lock_used;
871 #endif
872 };
873
[a6b48f6]874 void ?{}( fast_cond_var( L ) & this );
875 void ^?{}( fast_cond_var( L ) & this );
[7f958c4]876
[a6b48f6]877 bool notify_one( fast_cond_var( L ) & this );
878 bool notify_all( fast_cond_var( L ) & this );
[7f958c4]879
[a6b48f6]880 uintptr_t front( fast_cond_var( L ) & this );
881 bool empty ( fast_cond_var( L ) & this );
[7f958c4]882
[a6b48f6]883 void wait( fast_cond_var( L ) & this, L & l );
884 void wait( fast_cond_var( L ) & this, L & l, uintptr_t info );
[ae06e0b]885
886
887 //-----------------------------------------------------------------------------
888 // pthread_cond_var
889 //
890 // - cond var with minimal footprint
891 // - supports operations needed for phthread cond
892
893 struct pthread_cond_var {
[a6b48f6]894 dlist( info_thread( L ) ) blocked_threads;
[ae06e0b]895 __spinlock_t lock;
896 };
897
[a6b48f6]898 void ?{}( pthread_cond_var( L ) & this );
899 void ^?{}( pthread_cond_var( L ) & this );
[ae06e0b]900
[a6b48f6]901 bool notify_one( pthread_cond_var( L ) & this );
902 bool notify_all( pthread_cond_var( L ) & this );
[ae06e0b]903
[a6b48f6]904 uintptr_t front( pthread_cond_var( L ) & this );
905 bool empty ( pthread_cond_var( L ) & this );
[ae06e0b]906
[a6b48f6]907 void wait( pthread_cond_var( L ) & this, L & l );
908 void wait( pthread_cond_var( L ) & this, L & l, uintptr_t info );
909 bool wait( pthread_cond_var( L ) & this, L & l, timespec t );
910 bool wait( pthread_cond_var( L ) & this, L & l, uintptr_t info, timespec t );
[8a97248]911}
Note: See TracBrowser for help on using the repository browser.