source: libcfa/src/concurrency/locks.hfa@ b94579a

Last change on this file since b94579a was b94579a, checked in by Peter A. Buhr <pabuhr@…>, 4 days ago

update semaphore lock

  • Property mode set to 100644
File size: 29.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By : Peter A. Buhr
13// Last Modified On : Wed Nov 5 10:27:45 2025
14// Update Count : 61
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "collections/lockfree.hfa"
24#include "collections/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include "select.hfa"
33
34// futex headers
35#include <linux/futex.h> // Definition of FUTEX_* constants
36#include <sys/syscall.h> // Definition of SYS_* constants
37#include <unistd.h> // Definition of syscall routine
38
39typedef void (*__cfa_pre_park)( void * );
40
41static inline void pre_park_noop( void * ) {}
42
43//-----------------------------------------------------------------------------
44// is_blocking_lock
45forall( L & /*| sized( L )*/ )
46trait is_blocking_lock {
47 // For synchronization locks to use when acquiring
48 void on_notify( L &, struct thread$ * );
49
50 // For synchronization locks to use when releasing
51 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
52
53 // to set recursion count after getting signalled;
54 void on_wakeup( L &, size_t recursion );
55};
56
57static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
58 pp_fn( pp_datum );
59 park();
60}
61
62// macros for default routine impls for is_blocking_lock trait that do not wait-morph
63
64#define DEFAULT_ON_NOTIFY( lock_type ) \
65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
66
67#define DEFAULT_ON_WAIT( lock_type ) \
68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
69 unlock( this ); \
70 pre_park_then_park( pp_fn, pp_datum ); \
71 return 0; \
72 }
73
74// on_wakeup impl if lock should be reacquired after waking up
75#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
77
78// on_wakeup impl if lock will not be reacquired after waking up
79#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
81
82
83//-----------------------------------------------------------------------------
84// Semaphore, counting
85
86struct semaphore {
87 // PRIVATE
88 ssize_t count$; // - => # waiting threads, 0 => block, + => acquire
89 __spinlock_t lock$; // protect blocking-lock critical sections
90 __queue_t( thread$ ) waiting$; // waiting threads
91};
92
93void ?{}( semaphore & sem, ssize_t count = 1 );
94// Return values are used for composition. Calling threads know if a thread is unblocked, which can be useful for
95// debugging, performance instrumentation and other metadata tracking purposes.
96bool P( semaphore & sem );
97static inline bool P( semaphore & sem, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem ); }
98bool P( semaphore & sem, semaphore & lock ); // atomic block and release
99bool try_P( semaphore & sem );
100static inline bool P( semaphore & sem, semaphore & lock, uintptr_t shadow ) { active_thread()->shadow$ = shadow; return P( sem, lock ); }
101thread$ * V( semaphore & sem, const bool doUnpark = true );
102static inline bool V( semaphore & sem ) with( sem ) { return V( sem, true ) != 0p; }
103bool V( semaphore & sem, size_t count );
104static inline uintptr_t front( semaphore & sem ) with( sem ) { // return shadow information for first waiting thread
105 #ifdef __CFA_DEBUG__
106 if ( waiting$ ) { // condition queue must not be empty
107 abort( "Attempt to access user data on an empty semaphore lock.\n"
108 "Possible cause is not checking if the condition lock is empty before reading stored data." );
109 } // if
110 #endif // __CFA_DEBUG__
111 return waiting$.head->shadow$; // return condition information stored with blocked task
112}
113static inline ssize_t counter( semaphore & sem ) with( sem ) { return count$; } // - => # waiting threads, 0 => block, + => acquire
114//static inline int ?!=?( semaphore & sem, zero_t ) with( sem ) { return count$ != 0; } // empty waiting queue
115static inline bool empty( semaphore & sem ) with( sem ) { return count$ == 0; } // empty waiting queue
116
117
118//----------
119struct single_acquisition_lock {
120 inline blocking_lock;
121};
122
123static inline void ?{}( single_acquisition_lock & this ) { ((blocking_lock &)this){ false, false }; }
124static inline void ^?{}( single_acquisition_lock & this ) {}
125static inline void lock( single_acquisition_lock & this ) { lock( (blocking_lock &)this ); }
126static inline bool try_lock( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
127static inline void unlock( single_acquisition_lock & this ) { unlock( (blocking_lock &)this ); }
128static inline size_t on_wait( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
129static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
130static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
131static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
132static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
133static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
134__CFA_SELECT_GET_TYPE( single_acquisition_lock );
135
136//----------
137struct owner_lock {
138 inline blocking_lock;
139};
140
141static inline void ?{}( owner_lock & this ) { ((blocking_lock &)this){ true, true }; }
142static inline void ^?{}( owner_lock & this ) {}
143static inline void lock( owner_lock & this ) { lock( (blocking_lock &)this ); }
144static inline bool try_lock( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
145static inline void unlock( owner_lock & this ) { unlock( (blocking_lock &)this ); }
146static inline size_t on_wait( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
147static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
148static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
149static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
150static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
151static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
152__CFA_SELECT_GET_TYPE( owner_lock );
153
154//-----------------------------------------------------------------------------
155// MCS Lock
156struct mcs_node {
157 mcs_node * volatile next;
158 single_sem sem;
159};
160
161static inline void ?{}( mcs_node & this ) { this.next = 0p; }
162
163static inline mcs_node * volatile & next( mcs_node * node ) {
164 return node->next;
165}
166
167struct mcs_lock {
168 mcs_queue( mcs_node ) queue;
169};
170
171static inline void lock( mcs_lock & l, mcs_node & n ) {
172 if ( push( l.queue, &n ) )
173 wait( n.sem );
174}
175
176static inline void unlock( mcs_lock & l, mcs_node & n ) {
177 mcs_node * nxt = advance( l.queue, &n );
178 if ( nxt ) post( nxt->sem );
179}
180
181//-----------------------------------------------------------------------------
182// MCS Spin Lock
183// - No recursive acquisition
184// - Needs to be released by owner
185
186struct mcs_spin_node {
187 mcs_spin_node * volatile next;
188 volatile bool locked;
189};
190
191struct mcs_spin_queue {
192 mcs_spin_node * volatile tail;
193};
194
195static inline void ?{}( mcs_spin_node & this ) { this.next = 0p; this.locked = true; }
196
197struct mcs_spin_lock {
198 mcs_spin_queue queue;
199};
200
201static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
202 n.locked = true;
203
204 #if defined( __ARM_ARCH )
205 __asm__ __volatile__ ( "DMB ISH" ::: );
206 #endif
207
208 mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
209 if ( prev_val == 0p ) return;
210 prev_val->next = &n;
211
212 #if defined( __ARM_ARCH )
213 __asm__ __volatile__ ( "DMB ISH" ::: );
214 #endif
215
216 while ( __atomic_load_n( &n.locked, __ATOMIC_RELAXED ) ) Pause();
217
218 #if defined( __ARM_ARCH )
219 __asm__ __volatile__ ( "DMB ISH" ::: );
220 #endif
221}
222
223static inline void unlock( mcs_spin_lock & l, mcs_spin_node & n ) {
224 #if defined( __ARM_ARCH )
225 __asm__ __volatile__ ( "DMB ISH" ::: );
226 #endif
227
228 mcs_spin_node * n_ptr = &n;
229 if ( __atomic_compare_exchange_n( &l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return;
230 while ( __atomic_load_n( &n.next, __ATOMIC_RELAXED ) == 0p ) Pause();
231
232 #if defined( __ARM_ARCH )
233 __asm__ __volatile__ ( "DMB ISH" ::: );
234 #endif
235
236 n.next->locked = false;
237}
238
239//-----------------------------------------------------------------------------
240// futex_mutex
241
242// - Kernel thd blocking alternative to the spinlock
243// - No ownership (will deadlock on reacq)
244// - no reacq on wakeup
245struct futex_mutex {
246 // lock state any state other than UNLOCKED is locked
247 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
248
249 // stores a lock state
250 int val;
251};
252
253// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
254static inline int futex( int *uaddr, int futex_op, int val ) {
255 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
256}
257
258static inline void ?{}( futex_mutex & this ) with( this ) { val = 0; }
259
260static inline bool internal_try_lock( futex_mutex & this, int & compare_val ) with( this ) {
261 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
262}
263
264static inline int internal_exchange( futex_mutex & this ) with( this ) {
265 return __atomic_exchange_n(( int*)&val, 2, __ATOMIC_ACQUIRE );
266}
267
268// if this is called recursively IT WILL DEADLOCK!!!!!
269static inline void lock( futex_mutex & this ) with( this ) {
270 int state;
271
272 for ( spin; 4 ~ 1024 ~ spin ) {
273 state = 0;
274 // if unlocked, lock and return
275 if ( internal_try_lock( this, state ) ) return;
276 if ( state == 2 ) break;
277 for ( spin ) Pause();
278 }
279
280 // if not in contended state, set to be in contended state
281 if ( state != 2 ) state = internal_exchange( this );
282
283 // block and spin until we win the lock
284 while ( state != 0 ) {
285 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
286 state = internal_exchange( this );
287 }
288}
289
290static inline void unlock( futex_mutex & this ) with( this ) {
291 // if uncontended do atomic unlock and then return
292 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
293
294 // otherwise threads are blocked so we must wake one
295 futex(( int *)&val, FUTEX_WAKE, 1 );
296}
297
298DEFAULT_ON_NOTIFY( futex_mutex )
299DEFAULT_ON_WAIT( futex_mutex )
300DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
301
302//-----------------------------------------------------------------------------
303// go_mutex
304
305// - Kernel thd blocking alternative to the spinlock
306// - No ownership (will deadlock on reacq)
307// - Golang's flavour of mutex
308// - Impl taken from Golang: src/runtime/lock_futex.go
309struct go_mutex {
310 // lock state any state other than UNLOCKED is locked
311 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
312
313 // stores a lock state
314 int val;
315};
316static inline void ?{}( go_mutex & this ) with( this ) { val = 0; }
317static inline void ?{}( go_mutex & this, go_mutex this2 ) = void;
318static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
319
320static inline bool internal_try_lock( go_mutex & this, int & compare_val, int new_val ) with( this ) {
321 return __atomic_compare_exchange_n( (int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE );
322}
323
324static inline int internal_exchange( go_mutex & this, int swap ) with( this ) {
325 return __atomic_exchange_n( (int*)&val, swap, __ATOMIC_ACQUIRE );
326}
327
328// if this is called recursively IT WILL DEADLOCK!!!!!
329static inline void lock( go_mutex & this ) with( this ) {
330 int state, init_state;
331
332 // speculative grab
333 state = internal_exchange( this, 1 );
334 if ( ! state ) return; // state == 0
335 init_state = state;
336 for () {
337 for ( 4 ) {
338 while ( ! val ) { // lock unlocked
339 state = 0;
340 if ( internal_try_lock( this, state, init_state ) ) return;
341 }
342 for ( 30 ) Pause();
343 }
344
345 while ( ! val ) { // lock unlocked
346 state = 0;
347 if ( internal_try_lock( this, state, init_state ) ) return;
348 }
349 sched_yield();
350
351 // if not in contended state, set to be in contended state
352 state = internal_exchange( this, 2 );
353 if ( ! state ) return; // state == 0
354 init_state = 2;
355 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
356 }
357}
358
359static inline void unlock( go_mutex & this ) with( this ) {
360 // if uncontended do atomic unlock and then return
361 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
362
363 // otherwise threads are blocked so we must wake one
364 futex( (int *)&val, FUTEX_WAKE, 1 );
365}
366
367DEFAULT_ON_NOTIFY( go_mutex )
368DEFAULT_ON_WAIT( go_mutex )
369DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
370
371//-----------------------------------------------------------------------------
372// Exponential backoff then block lock
373struct exp_backoff_then_block_lock {
374 // Spin lock used for mutual exclusion
375 __spinlock_t spinlock;
376
377 // List of blocked threads
378 dlist( thread$ ) blocked_threads;
379
380 // Used for comparing and exchanging
381 volatile size_t lock_value;
382};
383
384static inline void ?{}( exp_backoff_then_block_lock & this ) {
385 this.spinlock{};
386 this.blocked_threads{};
387 this.lock_value = 0;
388}
389static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
390static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
391
392static inline void ^?{}( exp_backoff_then_block_lock & this ){}
393
394static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with( this ) {
395 return __atomic_compare_exchange_n( &lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED );
396}
397
398static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
399
400static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with( this ) {
401 return ! __atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
402}
403
404static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
405 lock( spinlock __cfaabi_dbg_ctx2 );
406 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
407 unlock( spinlock );
408 return true;
409 }
410 insert_last( blocked_threads, *active_thread() );
411 unlock( spinlock );
412 park( );
413 return true;
414}
415
416static inline void lock( exp_backoff_then_block_lock & this ) with( this ) {
417 size_t compare_val = 0;
418 int spin = 4;
419
420 // linear backoff
421 for () {
422 compare_val = 0;
423 if ( internal_try_lock( this, compare_val ) ) return;
424 if ( compare_val == 2 ) break;
425 for ( spin ) Pause();
426 if ( spin >= 1024 ) break;
427 spin += spin;
428 }
429
430 if ( 2 != compare_val && try_lock_contention( this ) ) return;
431 // block until signalled
432 while ( block( this ) ) if ( try_lock_contention( this ) ) return;
433}
434
435static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
436 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
437 lock( spinlock __cfaabi_dbg_ctx2 );
438 thread$ * t = &remove_first( blocked_threads );
439 unlock( spinlock );
440 unpark( t );
441}
442
443DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
444DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
445DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
446
447//-----------------------------------------------------------------------------
448// Fast Block Lock
449
450// minimal blocking lock
451// - No reacquire for cond var
452// - No recursive acquisition
453// - No ownership
454struct fast_block_lock {
455 // List of blocked threads
456 dlist( thread$ ) blocked_threads;
457
458 // Spin lock used for mutual exclusion
459 __spinlock_t lock;
460
461 // flag showing if lock is held
462 bool held:1;
463};
464
465static inline void ?{}( fast_block_lock & this ) with( this ) {
466 lock{};
467 blocked_threads{};
468 held = false;
469}
470static inline void ^?{}( fast_block_lock & this ) {}
471static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
472static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
473
474// if this is called recursively IT WILL DEADLOCK!!!!!
475static inline void lock( fast_block_lock & this ) with( this ) {
476 lock( lock __cfaabi_dbg_ctx2 );
477 if ( held ) {
478 insert_last( blocked_threads, *active_thread() );
479 unlock( lock );
480 park( );
481 return;
482 }
483 held = true;
484 unlock( lock );
485}
486
487static inline void unlock( fast_block_lock & this ) with( this ) {
488 lock( lock __cfaabi_dbg_ctx2 );
489 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
490 thread$ * t = &remove_first( blocked_threads );
491 held = ( t ? true : false );
492 unpark( t );
493 unlock( lock );
494}
495
496static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
497 lock( lock __cfaabi_dbg_ctx2 );
498 insert_last( blocked_threads, *t );
499 unlock( lock );
500}
501DEFAULT_ON_WAIT( fast_block_lock )
502DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
503
504//-----------------------------------------------------------------------------
505// simple_owner_lock
506
507// pthread owner lock
508// - reacquire for cond var
509// - recursive acquisition
510// - ownership
511struct simple_owner_lock {
512 // List of blocked threads
513 dlist( select_node ) blocked_threads;
514
515 // Spin lock used for mutual exclusion
516 __spinlock_t lock;
517
518 // owner showing if lock is held
519 struct thread$ * owner;
520
521 size_t recursion_count;
522};
523
524static inline void ?{}( simple_owner_lock & this ) with( this ) {
525 lock{};
526 blocked_threads{};
527 owner = 0p;
528 recursion_count = 0;
529}
530static inline void ^?{}( simple_owner_lock & this ) {}
531static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
532static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
533
534static inline void lock( simple_owner_lock & this ) with( this ) {
535 if ( owner == active_thread() ) {
536 recursion_count++;
537 return;
538 }
539 lock( lock __cfaabi_dbg_ctx2 );
540
541 if ( owner != 0p ) {
542 select_node node;
543 insert_last( blocked_threads, node );
544 unlock( lock );
545 park( );
546 return;
547 }
548 owner = active_thread();
549 recursion_count = 1;
550 unlock( lock );
551}
552
553static inline void pop_node( simple_owner_lock & this ) with( this ) {
554 __handle_waituntil_OR( blocked_threads );
555 select_node * node = &remove_first( blocked_threads );
556 if ( node ) {
557 owner = node->blocked_thread;
558 recursion_count = 1;
559 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
560 wake_one( blocked_threads, *node );
561 } else {
562 owner = 0p;
563 recursion_count = 0;
564 }
565}
566
567static inline void unlock( simple_owner_lock & this ) with( this ) {
568 lock( lock __cfaabi_dbg_ctx2 );
569 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
570 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
571 // if recursion count is zero release lock and set new owner if one is waiting
572 recursion_count--;
573 if ( recursion_count == 0 ) {
574 pop_node( this );
575 }
576 unlock( lock );
577}
578
579static inline void on_notify( simple_owner_lock & this, thread$ * t ) with( this ) {
580 lock( lock __cfaabi_dbg_ctx2 );
581 // lock held
582 if ( owner != 0p ) {
583 insert_last( blocked_threads, *(select_node *)t->link_node );
584 }
585 // lock not held
586 else {
587 owner = t;
588 recursion_count = 1;
589 unpark( t );
590 }
591 unlock( lock );
592}
593
594static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with( this ) {
595 lock( lock __cfaabi_dbg_ctx2 );
596 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
597 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
598
599 size_t ret = recursion_count;
600
601 pop_node( this );
602
603 select_node node;
604 active_thread()->link_node = (void *)&node;
605 unlock( lock );
606
607 pre_park_then_park( pp_fn, pp_datum );
608
609 return ret;
610}
611
612static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with( this ) { recursion_count = recursion; }
613
614// waituntil() support
615static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
616 lock( lock __cfaabi_dbg_ctx2 );
617
618 // check if we can complete operation. If so race to establish winner in special OR case
619 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
620 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
621 unlock( lock );
622 return false;
623 }
624 }
625
626 if ( owner == active_thread() ) {
627 recursion_count++;
628 if ( node.park_counter ) __make_select_node_available( node );
629 unlock( lock );
630 return true;
631 }
632
633 if ( owner != 0p ) {
634 insert_last( blocked_threads, node );
635 unlock( lock );
636 return false;
637 }
638
639 owner = active_thread();
640 recursion_count = 1;
641
642 if ( node.park_counter ) __make_select_node_available( node );
643 unlock( lock );
644 return true;
645}
646
647static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
648 lock( lock __cfaabi_dbg_ctx2 );
649 if ( isListed( node ) ) {
650 remove( node );
651 unlock( lock );
652 return false;
653 }
654
655 if ( owner == active_thread() ) {
656 recursion_count--;
657 if ( recursion_count == 0 ) {
658 pop_node( this );
659 }
660 }
661 unlock( lock );
662 return false;
663}
664
665static inline bool on_selected( simple_owner_lock & /*this*/, select_node & /*node*/ ) { return true; }
666__CFA_SELECT_GET_TYPE( simple_owner_lock );
667
668//-----------------------------------------------------------------------------
669// Spin Queue Lock
670
671// - No reacquire for cond var
672// - No recursive acquisition
673// - No ownership
674// - spin lock with no locking/atomics in unlock
675struct spin_queue_lock {
676 // Spin lock used for mutual exclusion
677 mcs_spin_lock lock;
678
679 // flag showing if lock is held
680 volatile bool held;
681};
682
683static inline void ?{}( spin_queue_lock & this ) with( this ) {
684 lock{};
685 held = false;
686}
687static inline void ^?{}( spin_queue_lock & this ) {}
688static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
689static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
690
691// if this is called recursively IT WILL DEADLOCK!
692static inline void lock( spin_queue_lock & this ) with( this ) {
693 mcs_spin_node node;
694 lock( lock, node );
695 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
696 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
697 unlock( lock, node );
698}
699
700static inline void unlock( spin_queue_lock & this ) with( this ) {
701 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
702}
703
704DEFAULT_ON_NOTIFY( spin_queue_lock )
705DEFAULT_ON_WAIT( spin_queue_lock )
706DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
707
708//-----------------------------------------------------------------------------
709// MCS Block Spin Lock
710
711// - No reacquire for cond var
712// - No recursive acquisition
713// - No ownership
714// - Blocks but first node spins (like spin queue but blocking for not first thd)
715struct mcs_block_spin_lock {
716 // Spin lock used for mutual exclusion
717 mcs_lock lock;
718
719 // flag showing if lock is held
720 volatile bool held;
721};
722
723static inline void ?{}( mcs_block_spin_lock & this ) with( this ) {
724 lock{};
725 held = false;
726}
727static inline void ^?{}( mcs_block_spin_lock & this ) {}
728static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
729static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
730
731// if this is called recursively IT WILL DEADLOCK!!!!!
732static inline void lock( mcs_block_spin_lock & this ) with( this ) {
733 mcs_node node;
734 lock( lock, node );
735 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
736 __atomic_store_n( &held, true, __ATOMIC_SEQ_CST );
737 unlock( lock, node );
738}
739
740static inline void unlock( mcs_block_spin_lock & this ) with( this ) {
741 __atomic_store_n( &held, false, __ATOMIC_SEQ_CST );
742}
743
744DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
745DEFAULT_ON_WAIT( mcs_block_spin_lock )
746DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
747
748//-----------------------------------------------------------------------------
749// Block Spin Lock
750
751// - No reacquire for cond var
752// - No recursive acquisition
753// - No ownership
754// - Blocks but first node spins (like spin queue but blocking for not first thd)
755struct block_spin_lock {
756 // Spin lock used for mutual exclusion
757 fast_block_lock lock;
758
759 // flag showing if lock is held
760 volatile bool held;
761};
762
763static inline void ?{}( block_spin_lock & this ) with( this ) {
764 lock{};
765 held = false;
766}
767static inline void ^?{}( block_spin_lock & this ) {}
768static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
769static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
770
771// if this is called recursively IT WILL DEADLOCK!!!!!
772static inline void lock( block_spin_lock & this ) with( this ) {
773 lock( lock );
774 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
775 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
776 unlock( lock );
777}
778
779static inline void unlock( block_spin_lock & this ) with( this ) {
780 __atomic_store_n( &held, false, __ATOMIC_RELEASE );
781}
782
783static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with( this.lock ) {
784 // first we acquire internal fast_block_lock
785 lock( lock __cfaabi_dbg_ctx2 );
786 if ( held ) { // if internal fast_block_lock is held
787 insert_last( blocked_threads, *t );
788 unlock( lock );
789 return;
790 }
791 // if internal fast_block_lock is not held
792 held = true;
793 unlock( lock );
794
795 unpark( t );
796}
797DEFAULT_ON_WAIT( block_spin_lock )
798 static inline void on_wakeup( block_spin_lock & this, size_t /*recursion*/ ) with( this ) {
799 // now we acquire the entire block_spin_lock upon waking up
800 while ( __atomic_load_n( &held, __ATOMIC_SEQ_CST ) ) Pause();
801 __atomic_store_n( &held, true, __ATOMIC_RELEASE );
802 unlock( lock ); // Now we release the internal fast_spin_lock
803}
804
805//-----------------------------------------------------------------------------
806// // info_thread
807// // the info thread is a wrapper around a thread used
808// // to store extra data for use in the condition variable
809forall( L & | is_blocking_lock( L ) ) {
810 struct info_thread;
811}
812
813//-----------------------------------------------------------------------------
814// Synchronization Locks
815forall( L & | is_blocking_lock( L ) ) {
816
817 //-----------------------------------------------------------------------------
818 // cond_lock
819
820 // The multi-tool condition variable
821 // - can pass timeouts to wait for either a signal or timeout
822 // - can wait without passing a lock
823 // - can have waiters reacquire different locks while waiting on the same cond var
824 // - has shadow queue
825 // - can be signalled outside of critical sections with no locks held
826 struct cond_lock {
827 // Spin lock used for mutual exclusion
828 __spinlock_t lock;
829
830 // List of blocked threads
831 dlist( info_thread( L ) ) blocked_threads;
832
833 // Count of current blocked threads
834 int count;
835 };
836
837 void ?{}( cond_lock( L ) & this );
838 void ^?{}( cond_lock( L ) & this );
839
840 bool notify_one( cond_lock( L ) & this );
841 bool notify_all( cond_lock( L ) & this );
842
843 uintptr_t front( cond_lock( L ) & this );
844
845 bool empty ( cond_lock( L ) & this );
846 int counter( cond_lock( L ) & this );
847
848 void wait( cond_lock( L ) & this );
849 void wait( cond_lock( L ) & this, uintptr_t info );
850 bool wait( cond_lock( L ) & this, Duration duration );
851 bool wait( cond_lock( L ) & this, uintptr_t info, Duration duration );
852
853 void wait( cond_lock( L ) & this, L & l );
854 void wait( cond_lock( L ) & this, L & l, uintptr_t info );
855 bool wait( cond_lock( L ) & this, L & l, Duration duration );
856 bool wait( cond_lock( L ) & this, L & l, uintptr_t info, Duration duration );
857
858 //-----------------------------------------------------------------------------
859 // fast_cond_var
860
861 // The trimmed and slim condition variable
862 // - no internal lock so you must hold a lock while using this cond var
863 // - signalling without holding branded lock is UNSAFE!
864 // - only allows usage of one lock, cond var is branded after usage
865
866 struct fast_cond_var {
867 // List of blocked threads
868 dlist( info_thread( L ) ) blocked_threads;
869 #ifdef __CFA_DEBUG__
870 L * lock_used;
871 #endif
872 };
873
874 void ?{}( fast_cond_var( L ) & this );
875 void ^?{}( fast_cond_var( L ) & this );
876
877 bool notify_one( fast_cond_var( L ) & this );
878 bool notify_all( fast_cond_var( L ) & this );
879
880 uintptr_t front( fast_cond_var( L ) & this );
881 bool empty ( fast_cond_var( L ) & this );
882
883 void wait( fast_cond_var( L ) & this, L & l );
884 void wait( fast_cond_var( L ) & this, L & l, uintptr_t info );
885
886
887 //-----------------------------------------------------------------------------
888 // pthread_cond_var
889 //
890 // - cond var with minimal footprint
891 // - supports operations needed for phthread cond
892
893 struct pthread_cond_var {
894 dlist( info_thread( L ) ) blocked_threads;
895 __spinlock_t lock;
896 };
897
898 void ?{}( pthread_cond_var( L ) & this );
899 void ^?{}( pthread_cond_var( L ) & this );
900
901 bool notify_one( pthread_cond_var( L ) & this );
902 bool notify_all( pthread_cond_var( L ) & this );
903
904 uintptr_t front( pthread_cond_var( L ) & this );
905 bool empty ( pthread_cond_var( L ) & this );
906
907 void wait( pthread_cond_var( L ) & this, L & l );
908 void wait( pthread_cond_var( L ) & this, L & l, uintptr_t info );
909 bool wait( pthread_cond_var( L ) & this, L & l, timespec t );
910 bool wait( pthread_cond_var( L ) & this, L & l, uintptr_t info, timespec t );
911}
Note: See TracBrowser for help on using the repository browser.