source: libcfa/src/concurrency/locks.hfa@ 86b418f

Last change on this file since 86b418f was 822ae48, checked in by Peter A. Buhr <pabuhr@…>, 5 days ago

update semaphore lock

  • Property mode set to 100644
File size: 29.2 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
[a6b48f6]12// Last Modified By : Peter A. Buhr
[822ae48]13// Last Modified On : Mon Nov 17 20:48:55 2025
14// Update Count : 63
[ab1b971]15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[55b060d]23#include "collections/lockfree.hfa"
24#include "collections/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[beeff61e]32#include "select.hfa"
33
[b77f0e1]34// futex headers
[a6b48f6]35#include <linux/futex.h> // Definition of FUTEX_* constants
36#include <sys/syscall.h> // Definition of SYS_* constants
37#include <unistd.h> // Definition of syscall routine
[b77f0e1]38
[fece3d9]39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
[a6b48f6]45forall( L & /*| sized( L )*/ )
[fece3d9]46trait is_blocking_lock {
47 // For synchronization locks to use when acquiring
48 void on_notify( L &, struct thread$ * );
49
50 // For synchronization locks to use when releasing
51 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53 // to set recursion count after getting signalled;
54 void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
[6b33e89]58 pp_fn( pp_datum );
59 park();
[fece3d9]60}
61
[5a05946]62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
[6b33e89]65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
[5a05946]66
67#define DEFAULT_ON_WAIT( lock_type ) \
[6b33e89]68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69 unlock( this ); \
70 pre_park_then_park( pp_fn, pp_datum ); \
71 return 0; \
72 }
[5a05946]73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
[6b33e89]76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
[5a05946]77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
[6b33e89]80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
[5a05946]81
82
[f4ec5e45]83//-----------------------------------------------------------------------------
[95330c33]84// Semaphore, counting
85
[f4ec5e45]86struct semaphore {
[95330c33]87 ssize_t count$; // - => # waiting threads, 0 => block, + => acquire
88 __spinlock_t lock$; // protect blocking-lock critical sections
89 __queue_t( thread$ ) waiting$; // waiting threads
[f4ec5e45]90};
91
[95330c33]92void ?{}( semaphore & sem, ssize_t count = 1 );
93// Return values are used for composition. Calling threads know if a thread is unblocked, which can be useful for
94// debugging, performance instrumentation and other metadata tracking purposes.
95bool P( semaphore & sem );
96static inline bool P( semaphore & sem, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem ); }
97bool P( semaphore & sem, semaphore & lock ); // atomic block and release
98bool try_P( semaphore & sem );
99static inline bool P( semaphore & sem, semaphore & lock, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem, lock ); }
[822ae48]100bool V( semaphore & sem );
101size_t V( semaphore & sem, size_t count );
[95330c33]102static inline uintptr_t front( semaphore & sem ) with( sem ) { // return shadow information for first waiting thread
103 #ifdef __CFA_DEBUG__
104 if ( waiting$ ) { // condition queue must not be empty
105 abort( "Attempt to access user data on an empty semaphore lock.\n"
106 "Possible cause is not checking if the condition lock is empty before reading stored data." );
107 } // if
108 #endif // __CFA_DEBUG__
109 return waiting$.head->shadow$; // return condition information stored with blocked task
110}
111static inline ssize_t counter( semaphore & sem ) with( sem ) { return count$; } // - => # waiting threads, 0 => block, + => acquire
112//static inline int ?!=?( semaphore & sem, zero_t ) with( sem ) { return count$ != 0; } // empty waiting queue
113static inline bool empty( semaphore & sem ) with( sem ) { return count$ == 0; } // empty waiting queue
114
[f4ec5e45]115
[ab1b971]116//----------
117struct single_acquisition_lock {
118 inline blocking_lock;
119};
120
[a6b48f6]121static inline void ?{}( single_acquisition_lock & this ) { ((blocking_lock &)this){ false, false }; }
[ab1b971]122static inline void ^?{}( single_acquisition_lock & this ) {}
[a6b48f6]123static inline void lock( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); }
124static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
125static inline void unlock( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); }
126static inline size_t on_wait( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
127static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
128static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
129static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
130static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
131static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[bf55f32]132__CFA_SELECT_GET_TYPE( single_acquisition_lock );
[ab1b971]133
134//----------
135struct owner_lock {
136 inline blocking_lock;
137};
138
[a6b48f6]139static inline void ?{}( owner_lock & this ) { ((blocking_lock &)this){ true, true }; }
[ab1b971]140static inline void ^?{}( owner_lock & this ) {}
[a6b48f6]141static inline void lock( owner_lock & this ) { lock( (blocking_lock &)this ); }
142static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
143static inline void unlock( owner_lock & this ) { unlock( (blocking_lock &)this ); }
144static inline size_t on_wait( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
145static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
146static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
147static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
148static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
149static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[bf55f32]150__CFA_SELECT_GET_TYPE( owner_lock );
[ab1b971]151
[7f958c4]152//-----------------------------------------------------------------------------
153// MCS Lock
[f4ec5e45]154struct mcs_node {
155 mcs_node * volatile next;
156 single_sem sem;
157};
158
[7a2c6b18]159static inline void ?{}( mcs_node & this ) { this.next = 0p; }
[f4ec5e45]160
[6b33e89]161static inline mcs_node * volatile & next( mcs_node * node ) {
[f4ec5e45]162 return node->next;
163}
164
165struct mcs_lock {
[a6b48f6]166 mcs_queue( mcs_node ) queue;
[f4ec5e45]167};
168
[7a2c6b18]169static inline void lock( mcs_lock & l, mcs_node & n ) {
[a6b48f6]170 if ( push( l.queue, &n ) )
171 wait( n.sem );
[f4ec5e45]172}
173
[a6b48f6]174static inline void unlock( mcs_lock & l, mcs_node & n ) {
[6b33e89]175 mcs_node * nxt = advance( l.queue, &n );
176 if ( nxt ) post( nxt->sem );
[f4ec5e45]177}
178
[f835806]179//-----------------------------------------------------------------------------
180// MCS Spin Lock
181// - No recursive acquisition
182// - Needs to be released by owner
183
184struct mcs_spin_node {
185 mcs_spin_node * volatile next;
[db7a3ad]186 volatile bool locked;
[f835806]187};
188
189struct mcs_spin_queue {
190 mcs_spin_node * volatile tail;
191};
192
[7a2c6b18]193static inline void ?{}( mcs_spin_node & this ) { this.next = 0p; this.locked = true; }
[f835806]194
195struct mcs_spin_lock {
196 mcs_spin_queue queue;
197};
198
[7a2c6b18]199static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
[6b33e89]200 n.locked = true;
[8df19af]201
[a6b48f6]202 #if defined( __ARM_ARCH )
[2ad5e1d5]203 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]204 #endif
205
[6b33e89]206 mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
207 if ( prev_val == 0p ) return;
208 prev_val->next = &n;
[8df19af]209
[a6b48f6]210 #if defined( __ARM_ARCH )
[2ad5e1d5]211 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]212 #endif
213
[a6b48f6]214 while ( __atomic_load_n( &n.locked, __ATOMIC_RELAXED ) ) Pause();
[8df19af]215
[a6b48f6]216 #if defined( __ARM_ARCH )
[2ad5e1d5]217 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]218 #endif
[f835806]219}
220
[a6b48f6]221static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n ) {
222 #if defined( __ARM_ARCH )
[2ad5e1d5]223 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]224 #endif
225
[f835806]226 mcs_spin_node * n_ptr = &n;
[a6b48f6]227 if ( __atomic_compare_exchange_n( &l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return;
228 while ( __atomic_load_n( &n.next, __ATOMIC_RELAXED ) == 0p ) Pause();
[8df19af]229
[a6b48f6]230 #if defined( __ARM_ARCH )
[2ad5e1d5]231 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]232 #endif
233
[9e3d123]234 n.next->locked = false;
[f835806]235}
236
[b77f0e1]237//-----------------------------------------------------------------------------
238// futex_mutex
239
240// - Kernel thd blocking alternative to the spinlock
241// - No ownership (will deadlock on reacq)
[5a05946]242// - no reacq on wakeup
[b77f0e1]243struct futex_mutex {
244 // lock state any state other than UNLOCKED is locked
245 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
246
247 // stores a lock state
248 int val;
249};
250
251// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
[a6b48f6]252static inline int futex( int *uaddr, int futex_op, int val ) {
[6b33e89]253 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
[b77f0e1]254}
255
[a6b48f6]256static inline void ?{}( futex_mutex & this ) with( this ) { val = 0; }
[b77f0e1]257
[a6b48f6]258static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with( this ) {
259 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
[b77f0e1]260}
261
[a6b48f6]262static inline int internal_exchange( futex_mutex & this ) with( this ) {
263 return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE );
[b77f0e1]264}
265
266// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]267static inline void lock( futex_mutex & this ) with( this ) {
[b77f0e1]268 int state;
269
[a6b48f6]270 for ( spin; 4 ~ 1024 ~ spin ) {
[a45e21c]271 state = 0;
272 // if unlocked, lock and return
[a6b48f6]273 if ( internal_try_lock( this, state ) ) return;
274 if ( state == 2 ) break;
275 for ( spin ) Pause();
[a45e21c]276 }
[b77f0e1]277
278 // if not in contended state, set to be in contended state
[a6b48f6]279 if ( state != 2 ) state = internal_exchange( this );
[b77f0e1]280
281 // block and spin until we win the lock
[a6b48f6]282 while ( state != 0 ) {
283 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
284 state = internal_exchange( this );
[b77f0e1]285 }
286}
287
[a6b48f6]288static inline void unlock( futex_mutex & this ) with( this ) {
[a45e21c]289 // if uncontended do atomic unlock and then return
[6b33e89]290 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
[b77f0e1]291
292 // otherwise threads are blocked so we must wake one
[a6b48f6]293 futex(( int *)&val, FUTEX_WAKE, 1 );
[b77f0e1]294}
295
[5a05946]296DEFAULT_ON_NOTIFY( futex_mutex )
297DEFAULT_ON_WAIT( futex_mutex )
298DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
[b77f0e1]299
[a45e21c]300//-----------------------------------------------------------------------------
301// go_mutex
302
303// - Kernel thd blocking alternative to the spinlock
304// - No ownership (will deadlock on reacq)
305// - Golang's flavour of mutex
306// - Impl taken from Golang: src/runtime/lock_futex.go
307struct go_mutex {
308 // lock state any state other than UNLOCKED is locked
309 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
310
311 // stores a lock state
312 int val;
313};
[a6b48f6]314static inline void ?{}( go_mutex & this ) with( this ) { val = 0; }
[7a2c6b18]315static inline void ?{}( go_mutex & this, go_mutex this2 ) = void;
316static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
[a45e21c]317
[a6b48f6]318static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with( this ) {
319 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
[a45e21c]320}
321
[a6b48f6]322static inline int internal_exchange( go_mutex & this, int swap ) with( this ) {
323 return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE );
[a45e21c]324}
325
326// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]327static inline void lock( go_mutex & this ) with( this ) {
[a45e21c]328 int state, init_state;
329
[6b33e89]330 // speculative grab
331 state = internal_exchange( this, 1 );
332 if ( ! state ) return; // state == 0
333 init_state = state;
334 for () {
335 for ( 4 ) {
336 while ( ! val ) { // lock unlocked
337 state = 0;
338 if ( internal_try_lock( this, state, init_state ) ) return;
339 }
340 for ( 30 ) Pause();
341 }
342
343 while ( ! val ) { // lock unlocked
344 state = 0;
345 if ( internal_try_lock( this, state, init_state ) ) return;
346 }
347 sched_yield();
348
349 // if not in contended state, set to be in contended state
350 state = internal_exchange( this, 2 );
351 if ( ! state ) return; // state == 0
352 init_state = 2;
353 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
354 }
[a45e21c]355}
356
[a6b48f6]357static inline void unlock( go_mutex & this ) with( this ) {
[a45e21c]358 // if uncontended do atomic unlock and then return
[6b33e89]359 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
[a45e21c]360
361 // otherwise threads are blocked so we must wake one
[beeff61e]362 futex( (int *)&val, FUTEX_WAKE, 1 );
[a45e21c]363}
364
[5a05946]365DEFAULT_ON_NOTIFY( go_mutex )
366DEFAULT_ON_WAIT( go_mutex )
367DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
[a45e21c]368
[7f958c4]369//-----------------------------------------------------------------------------
[0cee082]370// Exponential backoff then block lock
371struct exp_backoff_then_block_lock {
[5a46e09]372 // Spin lock used for mutual exclusion
373 __spinlock_t spinlock;
374
375 // List of blocked threads
[e84ab3d]376 dlist( thread$ ) blocked_threads;
[5a46e09]377
378 // Used for comparing and exchanging
379 volatile size_t lock_value;
380};
381
[0cee082]382static inline void ?{}( exp_backoff_then_block_lock & this ) {
[5a46e09]383 this.spinlock{};
384 this.blocked_threads{};
385 this.lock_value = 0;
386}
[5a05946]387static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
388static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
[5a46e09]389
[a45e21c]390static inline void ^?{}( exp_backoff_then_block_lock & this ){}
391
[a6b48f6]392static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this ) {
393 return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED );
[5a46e09]394}
395
[beeff61e]396static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
[5a46e09]397
[a6b48f6]398static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this ) {
399 return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
[5a46e09]400}
401
[a6b48f6]402static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
[6b33e89]403 lock( spinlock __cfaabi_dbg_ctx2 );
404 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
405 unlock( spinlock );
406 return true;
407 }
408 insert_last( blocked_threads, *active_thread() );
409 unlock( spinlock );
[5a46e09]410 park( );
411 return true;
412}
413
[a6b48f6]414static inline void lock( exp_backoff_then_block_lock & this ) with( this ) {
[5a46e09]415 size_t compare_val = 0;
[b77f0e1]416 int spin = 4;
[d30e3eb]417
[5a46e09]418 // linear backoff
[a6b48f6]419 for () {
[5a46e09]420 compare_val = 0;
[a6b48f6]421 if ( internal_try_lock( this, compare_val ) ) return;
422 if ( compare_val == 2 ) break;
423 for ( spin ) Pause();
424 if ( spin >= 1024 ) break;
[5a46e09]425 spin += spin;
426 }
427
[a6b48f6]428 if ( 2 != compare_val && try_lock_contention( this ) ) return;
[b7763da]429 // block until signalled
[a6b48f6]430 while ( block( this ) ) if ( try_lock_contention( this ) ) return;
[b7763da]431}
432
[a6b48f6]433static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
[6b33e89]434 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
435 lock( spinlock __cfaabi_dbg_ctx2 );
436 thread$ * t = &remove_first( blocked_threads );
437 unlock( spinlock );
438 unpark( t );
[5a46e09]439}
440
[5a05946]441DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
442DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
443DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
[5a46e09]444
[7f958c4]445//-----------------------------------------------------------------------------
446// Fast Block Lock
447
[f835806]448// minimal blocking lock
[7f958c4]449// - No reacquire for cond var
450// - No recursive acquisition
451// - No ownership
452struct fast_block_lock {
453 // List of blocked threads
454 dlist( thread$ ) blocked_threads;
455
[f835806]456 // Spin lock used for mutual exclusion
457 __spinlock_t lock;
458
459 // flag showing if lock is held
[7f958c4]460 bool held:1;
461};
462
[a6b48f6]463static inline void ?{}( fast_block_lock & this ) with( this ) {
[7f958c4]464 lock{};
465 blocked_threads{};
466 held = false;
467}
468static inline void ^?{}( fast_block_lock & this ) {}
469static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
470static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
471
472// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]473static inline void lock( fast_block_lock & this ) with( this ) {
[7f958c4]474 lock( lock __cfaabi_dbg_ctx2 );
[b77f0e1]475 if ( held ) {
[7f958c4]476 insert_last( blocked_threads, *active_thread() );
477 unlock( lock );
478 park( );
479 return;
480 }
481 held = true;
482 unlock( lock );
483}
484
[a6b48f6]485static inline void unlock( fast_block_lock & this ) with( this ) {
[7f958c4]486 lock( lock __cfaabi_dbg_ctx2 );
487 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
[6b33e89]488 thread$ * t = &remove_first( blocked_threads );
[7f958c4]489 held = ( t ? true : false );
490 unpark( t );
491 unlock( lock );
492}
493
[a6b48f6]494static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
[6b33e89]495 lock( lock __cfaabi_dbg_ctx2 );
496 insert_last( blocked_threads, *t );
497 unlock( lock );
[b77f0e1]498}
[5a05946]499DEFAULT_ON_WAIT( fast_block_lock )
500DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
[7f958c4]501
[f835806]502//-----------------------------------------------------------------------------
503// simple_owner_lock
504
505// pthread owner lock
506// - reacquire for cond var
507// - recursive acquisition
508// - ownership
509struct simple_owner_lock {
510 // List of blocked threads
[beeff61e]511 dlist( select_node ) blocked_threads;
[f835806]512
513 // Spin lock used for mutual exclusion
514 __spinlock_t lock;
515
516 // owner showing if lock is held
517 struct thread$ * owner;
518
519 size_t recursion_count;
520};
521
[a6b48f6]522static inline void ?{}( simple_owner_lock & this ) with( this ) {
[f835806]523 lock{};
524 blocked_threads{};
525 owner = 0p;
526 recursion_count = 0;
527}
528static inline void ^?{}( simple_owner_lock & this ) {}
529static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
530static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
531
[a6b48f6]532static inline void lock( simple_owner_lock & this ) with( this ) {
[beeff61e]533 if ( owner == active_thread() ) {
[ae06e0b]534 recursion_count++;
535 return;
536 }
537 lock( lock __cfaabi_dbg_ctx2 );
538
[beeff61e]539 if ( owner != 0p ) {
[6b33e89]540 select_node node;
[beeff61e]541 insert_last( blocked_threads, node );
[ae06e0b]542 unlock( lock );
543 park( );
544 return;
545 }
546 owner = active_thread();
547 recursion_count = 1;
548 unlock( lock );
549}
550
[a6b48f6]551static inline void pop_node( simple_owner_lock & this ) with( this ) {
[6b33e89]552 __handle_waituntil_OR( blocked_threads );
553 select_node * node = &remove_first( blocked_threads );
554 if ( node ) {
555 owner = node->blocked_thread;
556 recursion_count = 1;
557 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
558 wake_one( blocked_threads, *node );
559 } else {
560 owner = 0p;
561 recursion_count = 0;
562 }
[beeff61e]563}
[ae06e0b]564
[a6b48f6]565static inline void unlock( simple_owner_lock & this ) with( this ) {
[ae06e0b]566 lock( lock __cfaabi_dbg_ctx2 );
567 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
568 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
569 // if recursion count is zero release lock and set new owner if one is waiting
570 recursion_count--;
571 if ( recursion_count == 0 ) {
[beeff61e]572 pop_node( this );
[ae06e0b]573 }
574 unlock( lock );
575}
576
[a6b48f6]577static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this ) {
[ae06e0b]578 lock( lock __cfaabi_dbg_ctx2 );
579 // lock held
580 if ( owner != 0p ) {
[beeff61e]581 insert_last( blocked_threads, *(select_node *)t->link_node );
[ae06e0b]582 }
583 // lock not held
584 else {
585 owner = t;
586 recursion_count = 1;
587 unpark( t );
588 }
[b77f0e1]589 unlock( lock );
[ae06e0b]590}
591
[a6b48f6]592static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this ) {
[ae06e0b]593 lock( lock __cfaabi_dbg_ctx2 );
594 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
595 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
596
597 size_t ret = recursion_count;
598
[beeff61e]599 pop_node( this );
[ae06e0b]600
[6b33e89]601 select_node node;
602 active_thread()->link_node = (void *)&node;
[ae06e0b]603 unlock( lock );
[fece3d9]604
[6b33e89]605 pre_park_then_park( pp_fn, pp_datum );
[beeff61e]606
[ae06e0b]607 return ret;
608}
609
[a6b48f6]610static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this ) { recursion_count = recursion; }
[beeff61e]611
612// waituntil() support
[a6b48f6]613static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
[6b33e89]614 lock( lock __cfaabi_dbg_ctx2 );
[beeff61e]615
[6b33e89]616 // check if we can complete operation. If so race to establish winner in special OR case
617 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
618 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
619 unlock( lock );
620 return false;
621 }
622 }
[beeff61e]623
[6b33e89]624 if ( owner == active_thread() ) {
[beeff61e]625 recursion_count++;
[6b33e89]626 if ( node.park_counter ) __make_select_node_available( node );
627 unlock( lock );
[beeff61e]628 return true;
629 }
630
[6b33e89]631 if ( owner != 0p ) {
[beeff61e]632 insert_last( blocked_threads, node );
633 unlock( lock );
634 return false;
635 }
[6b33e89]636
[beeff61e]637 owner = active_thread();
638 recursion_count = 1;
639
[6b33e89]640 if ( node.park_counter ) __make_select_node_available( node );
641 unlock( lock );
642 return true;
[beeff61e]643}
644
[a6b48f6]645static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
[6b33e89]646 lock( lock __cfaabi_dbg_ctx2 );
647 if ( isListed( node ) ) {
648 remove( node );
649 unlock( lock );
650 return false;
651 }
652
653 if ( owner == active_thread() ) {
654 recursion_count--;
655 if ( recursion_count == 0 ) {
656 pop_node( this );
657 }
658 }
659 unlock( lock );
660 return false;
[beeff61e]661}
662
[a6b48f6]663static inline bool on_selected( simple_owner_lock & /*this*/, select_node & /*node*/ ) { return true; }
[bf55f32]664__CFA_SELECT_GET_TYPE( simple_owner_lock );
[ae06e0b]665
[f835806]666//-----------------------------------------------------------------------------
667// Spin Queue Lock
668
669// - No reacquire for cond var
670// - No recursive acquisition
671// - No ownership
672// - spin lock with no locking/atomics in unlock
673struct spin_queue_lock {
674 // Spin lock used for mutual exclusion
675 mcs_spin_lock lock;
676
677 // flag showing if lock is held
[db7a3ad]678 volatile bool held;
[f835806]679};
680
[a6b48f6]681static inline void ?{}( spin_queue_lock & this ) with( this ) {
[f835806]682 lock{};
683 held = false;
684}
685static inline void ^?{}( spin_queue_lock & this ) {}
686static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
687static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
688
[378de69]689// if this is called recursively IT WILL DEADLOCK!
[a6b48f6]690static inline void lock( spin_queue_lock & this ) with( this ) {
[f835806]691 mcs_spin_node node;
692 lock( lock, node );
[a6b48f6]693 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
694 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
[f835806]695 unlock( lock, node );
696}
697
[a6b48f6]698static inline void unlock( spin_queue_lock & this ) with( this ) {
699 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
[f835806]700}
701
[5a05946]702DEFAULT_ON_NOTIFY( spin_queue_lock )
703DEFAULT_ON_WAIT( spin_queue_lock )
704DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
[f835806]705
706//-----------------------------------------------------------------------------
707// MCS Block Spin Lock
708
709// - No reacquire for cond var
710// - No recursive acquisition
711// - No ownership
712// - Blocks but first node spins (like spin queue but blocking for not first thd)
713struct mcs_block_spin_lock {
714 // Spin lock used for mutual exclusion
715 mcs_lock lock;
716
717 // flag showing if lock is held
[db7a3ad]718 volatile bool held;
[f835806]719};
720
[a6b48f6]721static inline void ?{}( mcs_block_spin_lock & this ) with( this ) {
[f835806]722 lock{};
723 held = false;
724}
725static inline void ^?{}( mcs_block_spin_lock & this ) {}
726static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
727static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
728
729// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]730static inline void lock( mcs_block_spin_lock & this ) with( this ) {
[f835806]731 mcs_node node;
732 lock( lock, node );
[a6b48f6]733 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
734 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
[f835806]735 unlock( lock, node );
736}
737
[a6b48f6]738static inline void unlock( mcs_block_spin_lock & this ) with( this ) {
739 __atomic_store_n( &held, false, __ATOMIC_SEQ_CST );
[f835806]740}
741
[5a05946]742DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
743DEFAULT_ON_WAIT( mcs_block_spin_lock )
744DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
[f835806]745
746//-----------------------------------------------------------------------------
747// Block Spin Lock
748
749// - No reacquire for cond var
750// - No recursive acquisition
751// - No ownership
752// - Blocks but first node spins (like spin queue but blocking for not first thd)
753struct block_spin_lock {
754 // Spin lock used for mutual exclusion
755 fast_block_lock lock;
756
757 // flag showing if lock is held
[db7a3ad]758 volatile bool held;
[f835806]759};
760
[a6b48f6]761static inline void ?{}( block_spin_lock & this ) with( this ) {
[f835806]762 lock{};
763 held = false;
764}
765static inline void ^?{}( block_spin_lock & this ) {}
766static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
767static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
768
769// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]770static inline void lock( block_spin_lock & this ) with( this ) {
[f835806]771 lock( lock );
[a6b48f6]772 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
773 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
[f835806]774 unlock( lock );
775}
776
[a6b48f6]777static inline void unlock( block_spin_lock & this ) with( this ) {
778 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
[f835806]779}
780
[a6b48f6]781static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock ) {
[b77f0e1]782 // first we acquire internal fast_block_lock
783 lock( lock __cfaabi_dbg_ctx2 );
784 if ( held ) { // if internal fast_block_lock is held
785 insert_last( blocked_threads, *t );
786 unlock( lock );
787 return;
788 }
789 // if internal fast_block_lock is not held
790 held = true;
791 unlock( lock );
792
[a6b48f6]793 unpark( t );
[b77f0e1]794}
[5a05946]795DEFAULT_ON_WAIT( block_spin_lock )
[a6b48f6]796 static inline void on_wakeup( block_spin_lock & this, size_t /*recursion*/ ) with( this ) {
[b77f0e1]797 // now we acquire the entire block_spin_lock upon waking up
[a6b48f6]798 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
799 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
[b77f0e1]800 unlock( lock ); // Now we release the internal fast_spin_lock
801}
[f835806]802
[ac5816d]803//-----------------------------------------------------------------------------
[82f4063]804// // info_thread
805// // the info thread is a wrapper around a thread used
806// // to store extra data for use in the condition variable
[a6b48f6]807forall( L & | is_blocking_lock( L ) ) {
[ac5816d]808 struct info_thread;
[848439f]809}
810
[ac5816d]811//-----------------------------------------------------------------------------
812// Synchronization Locks
[a6b48f6]813forall( L & | is_blocking_lock( L ) ) {
[7f958c4]814
815 //-----------------------------------------------------------------------------
[8dc8f68]816 // cond_lock
[7f958c4]817
818 // The multi-tool condition variable
819 // - can pass timeouts to wait for either a signal or timeout
820 // - can wait without passing a lock
821 // - can have waiters reacquire different locks while waiting on the same cond var
822 // - has shadow queue
823 // - can be signalled outside of critical sections with no locks held
[8dc8f68]824 struct cond_lock {
[848439f]825 // Spin lock used for mutual exclusion
826 __spinlock_t lock;
827
828 // List of blocked threads
[a6b48f6]829 dlist( info_thread( L ) ) blocked_threads;
[848439f]830
831 // Count of current blocked threads
832 int count;
833 };
[e84ab3d]834
[8dc8f68]835 void ?{}( cond_lock( L ) & this );
836 void ^?{}( cond_lock( L ) & this );
[848439f]837
[8dc8f68]838 bool notify_one( cond_lock( L ) & this );
839 bool notify_all( cond_lock( L ) & this );
[848439f]840
[8dc8f68]841 uintptr_t front( cond_lock( L ) & this );
[848439f]842
[8dc8f68]843 bool empty ( cond_lock( L ) & this );
844 int counter( cond_lock( L ) & this );
[848439f]845
[8dc8f68]846 void wait( cond_lock( L ) & this );
847 void wait( cond_lock( L ) & this, uintptr_t info );
848 bool wait( cond_lock( L ) & this, Duration duration );
849 bool wait( cond_lock( L ) & this, uintptr_t info, Duration duration );
[848439f]850
[8dc8f68]851 void wait( cond_lock( L ) & this, L & l );
852 void wait( cond_lock( L ) & this, L & l, uintptr_t info );
853 bool wait( cond_lock( L ) & this, L & l, Duration duration );
854 bool wait( cond_lock( L ) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]855
856 //-----------------------------------------------------------------------------
857 // fast_cond_var
858
859 // The trimmed and slim condition variable
860 // - no internal lock so you must hold a lock while using this cond var
861 // - signalling without holding branded lock is UNSAFE!
862 // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]863
[7f958c4]864 struct fast_cond_var {
865 // List of blocked threads
[a6b48f6]866 dlist( info_thread( L ) ) blocked_threads;
[7f958c4]867 #ifdef __CFA_DEBUG__
868 L * lock_used;
869 #endif
870 };
871
[a6b48f6]872 void ?{}( fast_cond_var( L ) & this );
873 void ^?{}( fast_cond_var( L ) & this );
[7f958c4]874
[a6b48f6]875 bool notify_one( fast_cond_var( L ) & this );
876 bool notify_all( fast_cond_var( L ) & this );
[7f958c4]877
[a6b48f6]878 uintptr_t front( fast_cond_var( L ) & this );
879 bool empty ( fast_cond_var( L ) & this );
[7f958c4]880
[a6b48f6]881 void wait( fast_cond_var( L ) & this, L & l );
882 void wait( fast_cond_var( L ) & this, L & l, uintptr_t info );
[ae06e0b]883
884
885 //-----------------------------------------------------------------------------
886 // pthread_cond_var
887 //
888 // - cond var with minimal footprint
889 // - supports operations needed for phthread cond
890
891 struct pthread_cond_var {
[a6b48f6]892 dlist( info_thread( L ) ) blocked_threads;
[ae06e0b]893 __spinlock_t lock;
894 };
895
[a6b48f6]896 void ?{}( pthread_cond_var( L ) & this );
897 void ^?{}( pthread_cond_var( L ) & this );
[ae06e0b]898
[a6b48f6]899 bool notify_one( pthread_cond_var( L ) & this );
900 bool notify_all( pthread_cond_var( L ) & this );
[ae06e0b]901
[a6b48f6]902 uintptr_t front( pthread_cond_var( L ) & this );
903 bool empty ( pthread_cond_var( L ) & this );
[ae06e0b]904
[a6b48f6]905 void wait( pthread_cond_var( L ) & this, L & l );
906 void wait( pthread_cond_var( L ) & this, L & l, uintptr_t info );
907 bool wait( pthread_cond_var( L ) & this, L & l, timespec t );
908 bool wait( pthread_cond_var( L ) & this, L & l, uintptr_t info, timespec t );
[8a97248]909}
Note: See TracBrowser for help on using the repository browser.