source: libcfa/src/concurrency/locks.hfa@ d60a4c2

Last change on this file since d60a4c2 was a6b48f6, checked in by Peter A. Buhr <pabuhr@…>, 9 months ago

formatting, comment out unused parameter names to remove warnings

  • Property mode set to 100644
File size: 28.4 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
[a6b48f6]12// Last Modified By : Peter A. Buhr
13// Last Modified On : Tue Dec 24 09:36:52 2024
14// Update Count : 16
[ab1b971]15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[55b060d]23#include "collections/lockfree.hfa"
24#include "collections/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[beeff61e]32#include "select.hfa"
33
[b77f0e1]34// futex headers
[a6b48f6]35#include <linux/futex.h> // Definition of FUTEX_* constants
36#include <sys/syscall.h> // Definition of SYS_* constants
37#include <unistd.h> // Definition of syscall routine
[b77f0e1]38
[fece3d9]39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
[a6b48f6]45forall( L & /*| sized( L )*/ )
[fece3d9]46trait is_blocking_lock {
47 // For synchronization locks to use when acquiring
48 void on_notify( L &, struct thread$ * );
49
50 // For synchronization locks to use when releasing
51 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53 // to set recursion count after getting signalled;
54 void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
58 pp_fn( pp_datum );
59 park();
60}
61
[5a05946]62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
[a6b48f6]65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
[5a05946]66
67#define DEFAULT_ON_WAIT( lock_type ) \
68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69 unlock( this ); \
70 pre_park_then_park( pp_fn, pp_datum ); \
71 return 0; \
72 }
73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
[a6b48f6]76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
[5a05946]77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
[a6b48f6]80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
[5a05946]81
82
83
[f4ec5e45]84//-----------------------------------------------------------------------------
85// Semaphore
86struct semaphore {
87 __spinlock_t lock;
88 int count;
[a6b48f6]89 __queue_t( thread$) waiting;
[f4ec5e45]90};
91
[a6b48f6]92void ?{}( semaphore & this, int count = 1 );
93void ^?{}( semaphore & this );
94bool P( semaphore & this );
95bool V( semaphore & this );
96bool V( semaphore & this, unsigned count );
97thread$ * V( semaphore & this, bool );
[f4ec5e45]98
[ab1b971]99//----------
100struct single_acquisition_lock {
101 inline blocking_lock;
102};
103
[a6b48f6]104static inline void ?{}( single_acquisition_lock & this ) { ((blocking_lock &)this){ false, false }; }
[ab1b971]105static inline void ^?{}( single_acquisition_lock & this ) {}
[a6b48f6]106static inline void lock( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); }
107static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
108static inline void unlock( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); }
109static inline size_t on_wait( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
110static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
111static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
112static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
113static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
114static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[bf55f32]115__CFA_SELECT_GET_TYPE( single_acquisition_lock );
[ab1b971]116
117//----------
118struct owner_lock {
119 inline blocking_lock;
120};
121
[a6b48f6]122static inline void ?{}( owner_lock & this ) { ((blocking_lock &)this){ true, true }; }
[ab1b971]123static inline void ^?{}( owner_lock & this ) {}
[a6b48f6]124static inline void lock( owner_lock & this ) { lock( (blocking_lock &)this ); }
125static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
126static inline void unlock( owner_lock & this ) { unlock( (blocking_lock &)this ); }
127static inline size_t on_wait( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
128static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
129static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
130static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
131static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
132static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[bf55f32]133__CFA_SELECT_GET_TYPE( owner_lock );
[ab1b971]134
[7f958c4]135//-----------------------------------------------------------------------------
136// MCS Lock
[f4ec5e45]137struct mcs_node {
138 mcs_node * volatile next;
139 single_sem sem;
140};
141
[7a2c6b18]142static inline void ?{}( mcs_node & this ) { this.next = 0p; }
[f4ec5e45]143
144static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
145 return node->next;
146}
147
148struct mcs_lock {
[a6b48f6]149 mcs_queue( mcs_node ) queue;
[f4ec5e45]150};
151
[7a2c6b18]152static inline void lock( mcs_lock & l, mcs_node & n ) {
[a6b48f6]153 if ( push( l.queue, &n ) )
154 wait( n.sem );
[f4ec5e45]155}
156
[a6b48f6]157static inline void unlock( mcs_lock & l, mcs_node & n ) {
158 mcs_node * next = advance( l.queue, &n );
159 if ( next ) post( next->sem );
[f4ec5e45]160}
161
[f835806]162//-----------------------------------------------------------------------------
163// MCS Spin Lock
164// - No recursive acquisition
165// - Needs to be released by owner
166
167struct mcs_spin_node {
168 mcs_spin_node * volatile next;
[db7a3ad]169 volatile bool locked;
[f835806]170};
171
172struct mcs_spin_queue {
173 mcs_spin_node * volatile tail;
174};
175
[7a2c6b18]176static inline void ?{}( mcs_spin_node & this ) { this.next = 0p; this.locked = true; }
[f835806]177
178struct mcs_spin_lock {
179 mcs_spin_queue queue;
180};
181
[7a2c6b18]182static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
[5ece8ce]183 n.locked = true;
[8df19af]184
[a6b48f6]185 #if defined( __ARM_ARCH )
[2ad5e1d5]186 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]187 #endif
188
[a6b48f6]189 mcs_spin_node * prev = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
190 if ( prev == 0p ) return;
[9e3d123]191 prev->next = &n;
[8df19af]192
[a6b48f6]193 #if defined( __ARM_ARCH )
[2ad5e1d5]194 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]195 #endif
196
[a6b48f6]197 while ( __atomic_load_n( &n.locked, __ATOMIC_RELAXED ) ) Pause();
[8df19af]198
[a6b48f6]199 #if defined( __ARM_ARCH )
[2ad5e1d5]200 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]201 #endif
[f835806]202}
203
[a6b48f6]204static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n ) {
205 #if defined( __ARM_ARCH )
[2ad5e1d5]206 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]207 #endif
208
[f835806]209 mcs_spin_node * n_ptr = &n;
[a6b48f6]210 if ( __atomic_compare_exchange_n( &l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return;
211 while ( __atomic_load_n( &n.next, __ATOMIC_RELAXED ) == 0p ) Pause();
[8df19af]212
[a6b48f6]213 #if defined( __ARM_ARCH )
[2ad5e1d5]214 __asm__ __volatile__ ( "DMB ISH" ::: );
[8df19af]215 #endif
216
[9e3d123]217 n.next->locked = false;
[f835806]218}
219
[b77f0e1]220//-----------------------------------------------------------------------------
221// futex_mutex
222
223// - Kernel thd blocking alternative to the spinlock
224// - No ownership (will deadlock on reacq)
[5a05946]225// - no reacq on wakeup
[b77f0e1]226struct futex_mutex {
227 // lock state any state other than UNLOCKED is locked
228 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
229
230 // stores a lock state
231 int val;
232};
233
234// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
[a6b48f6]235static inline int futex( int *uaddr, int futex_op, int val ) {
236 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
[b77f0e1]237}
238
[a6b48f6]239static inline void ?{}( futex_mutex & this ) with( this ) { val = 0; }
[b77f0e1]240
[a6b48f6]241static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with( this ) {
242 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
[b77f0e1]243}
244
[a6b48f6]245static inline int internal_exchange( futex_mutex & this ) with( this ) {
246 return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE );
[b77f0e1]247}
248
249// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]250static inline void lock( futex_mutex & this ) with( this ) {
[b77f0e1]251 int state;
252
[a6b48f6]253 for ( spin; 4 ~ 1024 ~ spin ) {
[a45e21c]254 state = 0;
255 // if unlocked, lock and return
[a6b48f6]256 if ( internal_try_lock( this, state ) ) return;
257 if ( state == 2 ) break;
258 for ( spin ) Pause();
[a45e21c]259 }
[b77f0e1]260
261 // if not in contended state, set to be in contended state
[a6b48f6]262 if ( state != 2 ) state = internal_exchange( this );
[b77f0e1]263
264 // block and spin until we win the lock
[a6b48f6]265 while ( state != 0 ) {
266 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
267 state = internal_exchange( this );
[b77f0e1]268 }
269}
270
[a6b48f6]271static inline void unlock( futex_mutex & this ) with( this ) {
[a45e21c]272 // if uncontended do atomic unlock and then return
[a6b48f6]273 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
[b77f0e1]274
275 // otherwise threads are blocked so we must wake one
[a6b48f6]276 futex(( int *)&val, FUTEX_WAKE, 1 );
[b77f0e1]277}
278
[5a05946]279DEFAULT_ON_NOTIFY( futex_mutex )
280DEFAULT_ON_WAIT( futex_mutex )
281DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
[b77f0e1]282
[a45e21c]283//-----------------------------------------------------------------------------
284// go_mutex
285
286// - Kernel thd blocking alternative to the spinlock
287// - No ownership (will deadlock on reacq)
288// - Golang's flavour of mutex
289// - Impl taken from Golang: src/runtime/lock_futex.go
290struct go_mutex {
291 // lock state any state other than UNLOCKED is locked
292 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
293
294 // stores a lock state
295 int val;
296};
[a6b48f6]297static inline void ?{}( go_mutex & this ) with( this ) { val = 0; }
[7a2c6b18]298static inline void ?{}( go_mutex & this, go_mutex this2 ) = void;
299static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
[a45e21c]300
[a6b48f6]301static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with( this ) {
302 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
[a45e21c]303}
304
[a6b48f6]305static inline int internal_exchange( go_mutex & this, int swap ) with( this ) {
306 return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE );
[a45e21c]307}
308
309// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]310static inline void lock( go_mutex & this ) with( this ) {
[a45e21c]311 int state, init_state;
312
313 // speculative grab
[a6b48f6]314 state = internal_exchange( this, 1 );
315 if ( ! state ) return; // state == 0
[a45e21c]316 init_state = state;
[a6b48f6]317 for () {
318 for ( 4 ) {
319 while ( ! val ) { // lock unlocked
[a45e21c]320 state = 0;
[beeff61e]321 if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]322 }
[a6b48f6]323 for ( 30 ) Pause();
[a45e21c]324 }
325
[a6b48f6]326 while ( ! val ) { // lock unlocked
[a45e21c]327 state = 0;
[beeff61e]328 if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]329 }
330 sched_yield();
331
332 // if not in contended state, set to be in contended state
[beeff61e]333 state = internal_exchange( this, 2 );
[a6b48f6]334 if ( ! state ) return; // state == 0
[a45e21c]335 init_state = 2;
[a6b48f6]336 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
[a45e21c]337 }
338}
339
[a6b48f6]340static inline void unlock( go_mutex & this ) with( this ) {
[a45e21c]341 // if uncontended do atomic unlock and then return
[a6b48f6]342 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
[a45e21c]343
344 // otherwise threads are blocked so we must wake one
[beeff61e]345 futex( (int *)&val, FUTEX_WAKE, 1 );
[a45e21c]346}
347
[5a05946]348DEFAULT_ON_NOTIFY( go_mutex )
349DEFAULT_ON_WAIT( go_mutex )
350DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
[a45e21c]351
[7f958c4]352//-----------------------------------------------------------------------------
[0cee082]353// Exponential backoff then block lock
354struct exp_backoff_then_block_lock {
[5a46e09]355 // Spin lock used for mutual exclusion
356 __spinlock_t spinlock;
357
358 // List of blocked threads
[e84ab3d]359 dlist( thread$ ) blocked_threads;
[5a46e09]360
361 // Used for comparing and exchanging
362 volatile size_t lock_value;
363};
364
[0cee082]365static inline void ?{}( exp_backoff_then_block_lock & this ) {
[5a46e09]366 this.spinlock{};
367 this.blocked_threads{};
368 this.lock_value = 0;
369}
[5a05946]370static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
371static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
[5a46e09]372
[a45e21c]373static inline void ^?{}( exp_backoff_then_block_lock & this ){}
374
[a6b48f6]375static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this ) {
376 return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED );
[5a46e09]377}
378
[beeff61e]379static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
[5a46e09]380
[a6b48f6]381static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this ) {
382 return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
[5a46e09]383}
384
[a6b48f6]385static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
[d30e3eb]386 lock( spinlock __cfaabi_dbg_ctx2 );
[a6b48f6]387 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
[d30e3eb]388 unlock( spinlock );
389 return true;
390 }
391 insert_last( blocked_threads, *active_thread() );
392 unlock( spinlock );
[5a46e09]393 park( );
394 return true;
395}
396
[a6b48f6]397static inline void lock( exp_backoff_then_block_lock & this ) with( this ) {
[5a46e09]398 size_t compare_val = 0;
[b77f0e1]399 int spin = 4;
[d30e3eb]400
[5a46e09]401 // linear backoff
[a6b48f6]402 for () {
[5a46e09]403 compare_val = 0;
[a6b48f6]404 if ( internal_try_lock( this, compare_val ) ) return;
405 if ( compare_val == 2 ) break;
406 for ( spin ) Pause();
407 if ( spin >= 1024 ) break;
[5a46e09]408 spin += spin;
409 }
410
[a6b48f6]411 if ( 2 != compare_val && try_lock_contention( this ) ) return;
[b7763da]412 // block until signalled
[a6b48f6]413 while ( block( this ) ) if ( try_lock_contention( this ) ) return;
[b7763da]414}
415
[a6b48f6]416static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
417 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
[d30e3eb]418 lock( spinlock __cfaabi_dbg_ctx2 );
419 thread$ * t = &try_pop_front( blocked_threads );
420 unlock( spinlock );
421 unpark( t );
[5a46e09]422}
423
[5a05946]424DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
425DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
426DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
[5a46e09]427
[7f958c4]428//-----------------------------------------------------------------------------
429// Fast Block Lock
430
[f835806]431// minimal blocking lock
[7f958c4]432// - No reacquire for cond var
433// - No recursive acquisition
434// - No ownership
435struct fast_block_lock {
436 // List of blocked threads
437 dlist( thread$ ) blocked_threads;
438
[f835806]439 // Spin lock used for mutual exclusion
440 __spinlock_t lock;
441
442 // flag showing if lock is held
[7f958c4]443 bool held:1;
444};
445
[a6b48f6]446static inline void ?{}( fast_block_lock & this ) with( this ) {
[7f958c4]447 lock{};
448 blocked_threads{};
449 held = false;
450}
451static inline void ^?{}( fast_block_lock & this ) {}
452static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
453static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
454
455// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]456static inline void lock( fast_block_lock & this ) with( this ) {
[7f958c4]457 lock( lock __cfaabi_dbg_ctx2 );
[b77f0e1]458 if ( held ) {
[7f958c4]459 insert_last( blocked_threads, *active_thread() );
460 unlock( lock );
461 park( );
462 return;
463 }
464 held = true;
465 unlock( lock );
466}
467
[a6b48f6]468static inline void unlock( fast_block_lock & this ) with( this ) {
[7f958c4]469 lock( lock __cfaabi_dbg_ctx2 );
470 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
471 thread$ * t = &try_pop_front( blocked_threads );
472 held = ( t ? true : false );
473 unpark( t );
474 unlock( lock );
475}
476
[a6b48f6]477static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
[0cee082]478 lock( lock __cfaabi_dbg_ctx2 );
479 insert_last( blocked_threads, *t );
480 unlock( lock );
[b77f0e1]481}
[5a05946]482DEFAULT_ON_WAIT( fast_block_lock )
483DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
[7f958c4]484
[f835806]485//-----------------------------------------------------------------------------
486// simple_owner_lock
487
488// pthread owner lock
489// - reacquire for cond var
490// - recursive acquisition
491// - ownership
492struct simple_owner_lock {
493 // List of blocked threads
[beeff61e]494 dlist( select_node ) blocked_threads;
[f835806]495
496 // Spin lock used for mutual exclusion
497 __spinlock_t lock;
498
499 // owner showing if lock is held
500 struct thread$ * owner;
501
502 size_t recursion_count;
503};
504
[a6b48f6]505static inline void ?{}( simple_owner_lock & this ) with( this ) {
[f835806]506 lock{};
507 blocked_threads{};
508 owner = 0p;
509 recursion_count = 0;
510}
511static inline void ^?{}( simple_owner_lock & this ) {}
512static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
513static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
514
[a6b48f6]515static inline void lock( simple_owner_lock & this ) with( this ) {
[beeff61e]516 if ( owner == active_thread() ) {
[ae06e0b]517 recursion_count++;
518 return;
519 }
520 lock( lock __cfaabi_dbg_ctx2 );
521
[beeff61e]522 if ( owner != 0p ) {
523 select_node node;
524 insert_last( blocked_threads, node );
[ae06e0b]525 unlock( lock );
526 park( );
527 return;
528 }
529 owner = active_thread();
530 recursion_count = 1;
531 unlock( lock );
532}
533
[a6b48f6]534static inline void pop_node( simple_owner_lock & this ) with( this ) {
[beeff61e]535 __handle_waituntil_OR( blocked_threads );
536 select_node * node = &try_pop_front( blocked_threads );
537 if ( node ) {
538 owner = node->blocked_thread;
539 recursion_count = 1;
[a6b48f6]540 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
[beeff61e]541 wake_one( blocked_threads, *node );
542 } else {
543 owner = 0p;
544 recursion_count = 0;
545 }
546}
[ae06e0b]547
[a6b48f6]548static inline void unlock( simple_owner_lock & this ) with( this ) {
[ae06e0b]549 lock( lock __cfaabi_dbg_ctx2 );
550 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
551 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
552 // if recursion count is zero release lock and set new owner if one is waiting
553 recursion_count--;
554 if ( recursion_count == 0 ) {
[beeff61e]555 pop_node( this );
[ae06e0b]556 }
557 unlock( lock );
558}
559
[a6b48f6]560static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this ) {
[ae06e0b]561 lock( lock __cfaabi_dbg_ctx2 );
562 // lock held
563 if ( owner != 0p ) {
[beeff61e]564 insert_last( blocked_threads, *(select_node *)t->link_node );
[ae06e0b]565 }
566 // lock not held
567 else {
568 owner = t;
569 recursion_count = 1;
570 unpark( t );
571 }
[b77f0e1]572 unlock( lock );
[ae06e0b]573}
574
[a6b48f6]575static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this ) {
[ae06e0b]576 lock( lock __cfaabi_dbg_ctx2 );
577 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
578 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
579
580 size_t ret = recursion_count;
581
[beeff61e]582 pop_node( this );
[ae06e0b]583
[beeff61e]584 select_node node;
585 active_thread()->link_node = (void *)&node;
[ae06e0b]586 unlock( lock );
[fece3d9]587
588 pre_park_then_park( pp_fn, pp_datum );
[beeff61e]589
[ae06e0b]590 return ret;
591}
592
[a6b48f6]593static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this ) { recursion_count = recursion; }
[beeff61e]594
595// waituntil() support
[a6b48f6]596static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
[beeff61e]597 lock( lock __cfaabi_dbg_ctx2 );
598
599 // check if we can complete operation. If so race to establish winner in special OR case
[a6b48f6]600 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
601 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
[beeff61e]602 unlock( lock );
603 return false;
604 }
605 }
606
607 if ( owner == active_thread() ) {
608 recursion_count++;
609 if ( node.park_counter ) __make_select_node_available( node );
610 unlock( lock );
611 return true;
612 }
613
614 if ( owner != 0p ) {
615 insert_last( blocked_threads, node );
616 unlock( lock );
617 return false;
618 }
619
620 owner = active_thread();
621 recursion_count = 1;
622
623 if ( node.park_counter ) __make_select_node_available( node );
624 unlock( lock );
625 return true;
626}
627
[a6b48f6]628static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
[beeff61e]629 lock( lock __cfaabi_dbg_ctx2 );
630 if ( node`isListed ) {
631 remove( node );
632 unlock( lock );
633 return false;
634 }
635
636 if ( owner == active_thread() ) {
637 recursion_count--;
638 if ( recursion_count == 0 ) {
639 pop_node( this );
640 }
641 }
642 unlock( lock );
643 return false;
644}
645
[a6b48f6]646static inline bool on_selected( simple_owner_lock & /*this*/, select_node & /*node*/ ) { return true; }
[bf55f32]647__CFA_SELECT_GET_TYPE( simple_owner_lock );
[ae06e0b]648
[f835806]649//-----------------------------------------------------------------------------
650// Spin Queue Lock
651
652// - No reacquire for cond var
653// - No recursive acquisition
654// - No ownership
655// - spin lock with no locking/atomics in unlock
656struct spin_queue_lock {
657 // Spin lock used for mutual exclusion
658 mcs_spin_lock lock;
659
660 // flag showing if lock is held
[db7a3ad]661 volatile bool held;
[f835806]662};
663
[a6b48f6]664static inline void ?{}( spin_queue_lock & this ) with( this ) {
[f835806]665 lock{};
666 held = false;
667}
668static inline void ^?{}( spin_queue_lock & this ) {}
669static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
670static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
671
[378de69]672// if this is called recursively IT WILL DEADLOCK!
[a6b48f6]673static inline void lock( spin_queue_lock & this ) with( this ) {
[f835806]674 mcs_spin_node node;
675 lock( lock, node );
[a6b48f6]676 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
677 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
[f835806]678 unlock( lock, node );
679}
680
[a6b48f6]681static inline void unlock( spin_queue_lock & this ) with( this ) {
682 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
[f835806]683}
684
[5a05946]685DEFAULT_ON_NOTIFY( spin_queue_lock )
686DEFAULT_ON_WAIT( spin_queue_lock )
687DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
[f835806]688
689//-----------------------------------------------------------------------------
690// MCS Block Spin Lock
691
692// - No reacquire for cond var
693// - No recursive acquisition
694// - No ownership
695// - Blocks but first node spins (like spin queue but blocking for not first thd)
696struct mcs_block_spin_lock {
697 // Spin lock used for mutual exclusion
698 mcs_lock lock;
699
700 // flag showing if lock is held
[db7a3ad]701 volatile bool held;
[f835806]702};
703
[a6b48f6]704static inline void ?{}( mcs_block_spin_lock & this ) with( this ) {
[f835806]705 lock{};
706 held = false;
707}
708static inline void ^?{}( mcs_block_spin_lock & this ) {}
709static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
710static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
711
712// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]713static inline void lock( mcs_block_spin_lock & this ) with( this ) {
[f835806]714 mcs_node node;
715 lock( lock, node );
[a6b48f6]716 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
717 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
[f835806]718 unlock( lock, node );
719}
720
[a6b48f6]721static inline void unlock( mcs_block_spin_lock & this ) with( this ) {
722 __atomic_store_n( &held, false, __ATOMIC_SEQ_CST );
[f835806]723}
724
[5a05946]725DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
726DEFAULT_ON_WAIT( mcs_block_spin_lock )
727DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
[f835806]728
729//-----------------------------------------------------------------------------
730// Block Spin Lock
731
732// - No reacquire for cond var
733// - No recursive acquisition
734// - No ownership
735// - Blocks but first node spins (like spin queue but blocking for not first thd)
736struct block_spin_lock {
737 // Spin lock used for mutual exclusion
738 fast_block_lock lock;
739
740 // flag showing if lock is held
[db7a3ad]741 volatile bool held;
[f835806]742};
743
[a6b48f6]744static inline void ?{}( block_spin_lock & this ) with( this ) {
[f835806]745 lock{};
746 held = false;
747}
748static inline void ^?{}( block_spin_lock & this ) {}
749static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
750static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
751
752// if this is called recursively IT WILL DEADLOCK!!!!!
[a6b48f6]753static inline void lock( block_spin_lock & this ) with( this ) {
[f835806]754 lock( lock );
[a6b48f6]755 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
756 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
[f835806]757 unlock( lock );
758}
759
[a6b48f6]760static inline void unlock( block_spin_lock & this ) with( this ) {
761 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
[f835806]762}
763
[a6b48f6]764static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock ) {
[b77f0e1]765 // first we acquire internal fast_block_lock
766 lock( lock __cfaabi_dbg_ctx2 );
767 if ( held ) { // if internal fast_block_lock is held
768 insert_last( blocked_threads, *t );
769 unlock( lock );
770 return;
771 }
772 // if internal fast_block_lock is not held
773 held = true;
774 unlock( lock );
775
[a6b48f6]776 unpark( t );
[b77f0e1]777}
[5a05946]778DEFAULT_ON_WAIT( block_spin_lock )
[a6b48f6]779 static inline void on_wakeup( block_spin_lock & this, size_t /*recursion*/ ) with( this ) {
[b77f0e1]780 // now we acquire the entire block_spin_lock upon waking up
[a6b48f6]781 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
782 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
[b77f0e1]783 unlock( lock ); // Now we release the internal fast_spin_lock
784}
[f835806]785
[ac5816d]786//-----------------------------------------------------------------------------
[82f4063]787// // info_thread
788// // the info thread is a wrapper around a thread used
789// // to store extra data for use in the condition variable
[a6b48f6]790forall( L & | is_blocking_lock( L ) ) {
[ac5816d]791 struct info_thread;
[848439f]792}
793
[ac5816d]794//-----------------------------------------------------------------------------
795// Synchronization Locks
[a6b48f6]796forall( L & | is_blocking_lock( L ) ) {
[7f958c4]797
798 //-----------------------------------------------------------------------------
799 // condition_variable
800
801 // The multi-tool condition variable
802 // - can pass timeouts to wait for either a signal or timeout
803 // - can wait without passing a lock
804 // - can have waiters reacquire different locks while waiting on the same cond var
805 // - has shadow queue
806 // - can be signalled outside of critical sections with no locks held
[eeb5023]807 struct condition_variable {
[848439f]808 // Spin lock used for mutual exclusion
809 __spinlock_t lock;
810
811 // List of blocked threads
[a6b48f6]812 dlist( info_thread( L ) ) blocked_threads;
[848439f]813
814 // Count of current blocked threads
815 int count;
816 };
[e84ab3d]817
[a6b48f6]818 void ?{}( condition_variable( L ) & this );
819 void ^?{}( condition_variable( L ) & this );
[848439f]820
[a6b48f6]821 bool notify_one( condition_variable( L ) & this );
822 bool notify_all( condition_variable( L ) & this );
[848439f]823
[a6b48f6]824 uintptr_t front( condition_variable( L ) & this );
[848439f]825
[a6b48f6]826 bool empty ( condition_variable( L ) & this );
827 int counter( condition_variable( L ) & this );
[848439f]828
[a6b48f6]829 void wait( condition_variable( L ) & this );
830 void wait( condition_variable( L ) & this, uintptr_t info );
831 bool wait( condition_variable( L ) & this, Duration duration );
832 bool wait( condition_variable( L ) & this, uintptr_t info, Duration duration );
[848439f]833
[a6b48f6]834 void wait( condition_variable( L ) & this, L & l );
835 void wait( condition_variable( L ) & this, L & l, uintptr_t info );
836 bool wait( condition_variable( L ) & this, L & l, Duration duration );
837 bool wait( condition_variable( L ) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]838
839 //-----------------------------------------------------------------------------
840 // fast_cond_var
841
842 // The trimmed and slim condition variable
843 // - no internal lock so you must hold a lock while using this cond var
844 // - signalling without holding branded lock is UNSAFE!
845 // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]846
[7f958c4]847 struct fast_cond_var {
848 // List of blocked threads
[a6b48f6]849 dlist( info_thread( L ) ) blocked_threads;
[7f958c4]850 #ifdef __CFA_DEBUG__
851 L * lock_used;
852 #endif
853 };
854
[a6b48f6]855 void ?{}( fast_cond_var( L ) & this );
856 void ^?{}( fast_cond_var( L ) & this );
[7f958c4]857
[a6b48f6]858 bool notify_one( fast_cond_var( L ) & this );
859 bool notify_all( fast_cond_var( L ) & this );
[7f958c4]860
[a6b48f6]861 uintptr_t front( fast_cond_var( L ) & this );
862 bool empty ( fast_cond_var( L ) & this );
[7f958c4]863
[a6b48f6]864 void wait( fast_cond_var( L ) & this, L & l );
865 void wait( fast_cond_var( L ) & this, L & l, uintptr_t info );
[ae06e0b]866
867
868 //-----------------------------------------------------------------------------
869 // pthread_cond_var
870 //
871 // - cond var with minimal footprint
872 // - supports operations needed for phthread cond
873
874 struct pthread_cond_var {
[a6b48f6]875 dlist( info_thread( L ) ) blocked_threads;
[ae06e0b]876 __spinlock_t lock;
877 };
878
[a6b48f6]879 void ?{}( pthread_cond_var( L ) & this );
880 void ^?{}( pthread_cond_var( L ) & this );
[ae06e0b]881
[a6b48f6]882 bool notify_one( pthread_cond_var( L ) & this );
883 bool notify_all( pthread_cond_var( L ) & this );
[ae06e0b]884
[a6b48f6]885 uintptr_t front( pthread_cond_var( L ) & this );
886 bool empty ( pthread_cond_var( L ) & this );
[ae06e0b]887
[a6b48f6]888 void wait( pthread_cond_var( L ) & this, L & l );
889 void wait( pthread_cond_var( L ) & this, L & l, uintptr_t info );
890 bool wait( pthread_cond_var( L ) & this, L & l, timespec t );
891 bool wait( pthread_cond_var( L ) & this, L & l, uintptr_t info, timespec t );
[8a97248]892}
Note: See TracBrowser for help on using the repository browser.