source: libcfa/src/concurrency/locks.hfa@ 5908fb4

ADT ast-experimental
Last change on this file since 5908fb4 was 5a05946, checked in by caparsons <caparson@…>, 2 years ago

cleanup up locks files and fixed a minor whitespace issue in preemption.cfa

  • Property mode set to 100644
File size: 27.8 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[88ac843e]23#include "containers/lockfree.hfa"
[82f4063]24#include "containers/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[beeff61e]32#include "select.hfa"
33
[b77f0e1]34#include <fstream.hfa>
35
36// futex headers
37#include <linux/futex.h> /* Definition of FUTEX_* constants */
38#include <sys/syscall.h> /* Definition of SYS_* constants */
39#include <unistd.h>
40
[fece3d9]41typedef void (*__cfa_pre_park)( void * );
42
43static inline void pre_park_noop( void * ) {}
44
45//-----------------------------------------------------------------------------
46// is_blocking_lock
47forall( L & | sized(L) )
48trait is_blocking_lock {
49 // For synchronization locks to use when acquiring
50 void on_notify( L &, struct thread$ * );
51
52 // For synchronization locks to use when releasing
53 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
54
55 // to set recursion count after getting signalled;
56 void on_wakeup( L &, size_t recursion );
57};
58
59static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
60 pp_fn( pp_datum );
61 park();
62}
63
[5a05946]64// macros for default routine impls for is_blocking_lock trait that do not wait-morph
65
66#define DEFAULT_ON_NOTIFY( lock_type ) \
67 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
68
69#define DEFAULT_ON_WAIT( lock_type ) \
70 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
71 unlock( this ); \
72 pre_park_then_park( pp_fn, pp_datum ); \
73 return 0; \
74 }
75
76// on_wakeup impl if lock should be reacquired after waking up
77#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
78 static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
79
80// on_wakeup impl if lock will not be reacquired after waking up
81#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
82 static inline void on_wakeup( lock_type & this, size_t recursion ) {}
83
84
85
[f4ec5e45]86//-----------------------------------------------------------------------------
87// Semaphore
88struct semaphore {
89 __spinlock_t lock;
90 int count;
[e84ab3d]91 __queue_t(thread$) waiting;
[f4ec5e45]92};
93
94void ?{}(semaphore & this, int count = 1);
95void ^?{}(semaphore & this);
96bool P (semaphore & this);
97bool V (semaphore & this);
98bool V (semaphore & this, unsigned count);
[e84ab3d]99thread$ * V (semaphore & this, bool );
[f4ec5e45]100
[ab1b971]101//----------
102struct single_acquisition_lock {
103 inline blocking_lock;
104};
105
106static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
107static inline void ^?{}( single_acquisition_lock & this ) {}
[22b7579]108static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
109static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
110static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
[fece3d9]111static inline size_t on_wait ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
[22b7579]112static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]113static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[beeff61e]114static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
115static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
116static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[ab1b971]117
118//----------
119struct owner_lock {
120 inline blocking_lock;
121};
122
123static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
124static inline void ^?{}( owner_lock & this ) {}
[f19497c]125static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
[d27b6be]126static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
[f19497c]127static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
[fece3d9]128static inline size_t on_wait ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
[22b7579]129static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]130static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[beeff61e]131static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
132static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
133static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[ab1b971]134
[7f958c4]135//-----------------------------------------------------------------------------
136// MCS Lock
[f4ec5e45]137struct mcs_node {
138 mcs_node * volatile next;
139 single_sem sem;
140};
141
[8f5576d5]142static inline void ?{}(mcs_node & this) { this.next = 0p; }
[f4ec5e45]143
144static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
145 return node->next;
146}
147
148struct mcs_lock {
149 mcs_queue(mcs_node) queue;
150};
151
152static inline void lock(mcs_lock & l, mcs_node & n) {
153 if(push(l.queue, &n))
154 wait(n.sem);
155}
156
157static inline void unlock(mcs_lock & l, mcs_node & n) {
158 mcs_node * next = advance(l.queue, &n);
159 if(next) post(next->sem);
160}
161
[f835806]162//-----------------------------------------------------------------------------
163// MCS Spin Lock
164// - No recursive acquisition
165// - Needs to be released by owner
166
167struct mcs_spin_node {
168 mcs_spin_node * volatile next;
[db7a3ad]169 volatile bool locked;
[f835806]170};
171
172struct mcs_spin_queue {
173 mcs_spin_node * volatile tail;
174};
175
176static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
177
178static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
179 return node->next;
180}
181
182struct mcs_spin_lock {
183 mcs_spin_queue queue;
184};
185
186static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
187 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
[76a798d]188 n.locked = true;
[9e3d123]189 if(prev == 0p) return;
190 prev->next = &n;
[76a798d]191 while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
[f835806]192}
193
194static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
195 mcs_spin_node * n_ptr = &n;
[9e3d123]196 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
[76a798d]197 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
[9e3d123]198 n.next->locked = false;
[f835806]199}
200
[b77f0e1]201//-----------------------------------------------------------------------------
202// futex_mutex
203
204// - Kernel thd blocking alternative to the spinlock
205// - No ownership (will deadlock on reacq)
[5a05946]206// - no reacq on wakeup
[b77f0e1]207struct futex_mutex {
208 // lock state any state other than UNLOCKED is locked
209 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
210
211 // stores a lock state
212 int val;
213};
214
215// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
[7d9598d8]216static inline int futex(int *uaddr, int futex_op, int val) {
[b77f0e1]217 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
218}
219
[5a05946]220static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
[b77f0e1]221
[5a05946]222static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
[b77f0e1]223 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
224}
225
[5a05946]226static inline int internal_exchange( futex_mutex & this ) with(this) {
[b77f0e1]227 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
228}
229
230// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]231static inline void lock( futex_mutex & this ) with(this) {
[b77f0e1]232 int state;
233
[a45e21c]234 for( int spin = 4; spin < 1024; spin += spin) {
235 state = 0;
236 // if unlocked, lock and return
237 if (internal_try_lock(this, state)) return;
238 if (2 == state) break;
239 for (int i = 0; i < spin; i++) Pause();
240 }
[b77f0e1]241
242 // if not in contended state, set to be in contended state
243 if (state != 2) state = internal_exchange(this);
244
245 // block and spin until we win the lock
246 while (state != 0) {
247 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
248 state = internal_exchange(this);
249 }
250}
251
252static inline void unlock(futex_mutex & this) with(this) {
[a45e21c]253 // if uncontended do atomic unlock and then return
254 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
[b77f0e1]255
256 // otherwise threads are blocked so we must wake one
257 futex((int *)&val, FUTEX_WAKE, 1);
258}
259
[5a05946]260DEFAULT_ON_NOTIFY( futex_mutex )
261DEFAULT_ON_WAIT( futex_mutex )
262DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
[b77f0e1]263
[a45e21c]264//-----------------------------------------------------------------------------
265// go_mutex
266
267// - Kernel thd blocking alternative to the spinlock
268// - No ownership (will deadlock on reacq)
269// - Golang's flavour of mutex
270// - Impl taken from Golang: src/runtime/lock_futex.go
271struct go_mutex {
272 // lock state any state other than UNLOCKED is locked
273 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
274
275 // stores a lock state
276 int val;
277};
278static inline void ?{}( go_mutex & this ) with(this) { val = 0; }
[5a05946]279// static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted
280// static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
[a45e21c]281
282static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
283 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
284}
285
286static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
287 return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
288}
289
290// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]291static inline void lock( go_mutex & this ) with( this ) {
[a45e21c]292 int state, init_state;
293
294 // speculative grab
295 state = internal_exchange(this, 1);
296 if ( !state ) return; // state == 0
297 init_state = state;
298 for (;;) {
[bd72c284]299 for( int i = 0; i < 4; i++ ) {
[a45e21c]300 while( !val ) { // lock unlocked
301 state = 0;
[beeff61e]302 if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]303 }
[bd72c284]304 for (int i = 0; i < 30; i++) Pause();
[a45e21c]305 }
306
307 while( !val ) { // lock unlocked
308 state = 0;
[beeff61e]309 if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]310 }
311 sched_yield();
312
313 // if not in contended state, set to be in contended state
[beeff61e]314 state = internal_exchange( this, 2 );
[a45e21c]315 if ( !state ) return; // state == 0
316 init_state = 2;
[beeff61e]317 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
[a45e21c]318 }
319}
320
321static inline void unlock( go_mutex & this ) with(this) {
322 // if uncontended do atomic unlock and then return
[beeff61e]323 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
[a45e21c]324
325 // otherwise threads are blocked so we must wake one
[beeff61e]326 futex( (int *)&val, FUTEX_WAKE, 1 );
[a45e21c]327}
328
[5a05946]329DEFAULT_ON_NOTIFY( go_mutex )
330DEFAULT_ON_WAIT( go_mutex )
331DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
[a45e21c]332
[7f958c4]333//-----------------------------------------------------------------------------
[0cee082]334// Exponential backoff then block lock
335struct exp_backoff_then_block_lock {
[5a46e09]336 // Spin lock used for mutual exclusion
337 __spinlock_t spinlock;
338
339 // List of blocked threads
[e84ab3d]340 dlist( thread$ ) blocked_threads;
[5a46e09]341
342 // Used for comparing and exchanging
343 volatile size_t lock_value;
344};
345
[0cee082]346static inline void ?{}( exp_backoff_then_block_lock & this ) {
[5a46e09]347 this.spinlock{};
348 this.blocked_threads{};
349 this.lock_value = 0;
350}
[5a05946]351static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
352static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
[5a46e09]353
[a45e21c]354static inline void ^?{}( exp_backoff_then_block_lock & this ){}
355
[beeff61e]356static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
[d30e3eb]357 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
[5a46e09]358}
359
[beeff61e]360static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
[5a46e09]361
[beeff61e]362static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
363 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
[5a46e09]364}
365
[beeff61e]366static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
[d30e3eb]367 lock( spinlock __cfaabi_dbg_ctx2 );
368 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
369 unlock( spinlock );
370 return true;
371 }
372 insert_last( blocked_threads, *active_thread() );
373 unlock( spinlock );
[5a46e09]374 park( );
375 return true;
376}
377
[beeff61e]378static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
[5a46e09]379 size_t compare_val = 0;
[b77f0e1]380 int spin = 4;
[d30e3eb]381
[5a46e09]382 // linear backoff
383 for( ;; ) {
384 compare_val = 0;
[0d4f954]385 if (internal_try_lock(this, compare_val)) return;
[5a46e09]386 if (2 == compare_val) break;
387 for (int i = 0; i < spin; i++) Pause();
[b77f0e1]388 if (spin >= 1024) break;
[5a46e09]389 spin += spin;
390 }
391
[0d4f954]392 if(2 != compare_val && try_lock_contention(this)) return;
[b7763da]393 // block until signalled
[0d4f954]394 while (block(this)) if(try_lock_contention(this)) return;
[b7763da]395}
396
[beeff61e]397static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
[5a46e09]398 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
[d30e3eb]399 lock( spinlock __cfaabi_dbg_ctx2 );
400 thread$ * t = &try_pop_front( blocked_threads );
401 unlock( spinlock );
402 unpark( t );
[5a46e09]403}
404
[5a05946]405DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
406DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
407DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
[5a46e09]408
[7f958c4]409//-----------------------------------------------------------------------------
410// Fast Block Lock
411
[f835806]412// minimal blocking lock
[7f958c4]413// - No reacquire for cond var
414// - No recursive acquisition
415// - No ownership
416struct fast_block_lock {
417 // List of blocked threads
418 dlist( thread$ ) blocked_threads;
419
[f835806]420 // Spin lock used for mutual exclusion
421 __spinlock_t lock;
422
423 // flag showing if lock is held
[7f958c4]424 bool held:1;
425};
426
427static inline void ?{}( fast_block_lock & this ) with(this) {
428 lock{};
429 blocked_threads{};
430 held = false;
431}
432static inline void ^?{}( fast_block_lock & this ) {}
433static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
434static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
435
436// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]437static inline void lock( fast_block_lock & this ) with(this) {
[7f958c4]438 lock( lock __cfaabi_dbg_ctx2 );
[b77f0e1]439 if ( held ) {
[7f958c4]440 insert_last( blocked_threads, *active_thread() );
441 unlock( lock );
442 park( );
443 return;
444 }
445 held = true;
446 unlock( lock );
447}
448
[beeff61e]449static inline void unlock( fast_block_lock & this ) with(this) {
[7f958c4]450 lock( lock __cfaabi_dbg_ctx2 );
451 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
452 thread$ * t = &try_pop_front( blocked_threads );
453 held = ( t ? true : false );
454 unpark( t );
455 unlock( lock );
456}
457
[beeff61e]458static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
[0cee082]459 lock( lock __cfaabi_dbg_ctx2 );
460 insert_last( blocked_threads, *t );
461 unlock( lock );
[b77f0e1]462}
[5a05946]463DEFAULT_ON_WAIT( fast_block_lock )
464DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
[7f958c4]465
[f835806]466//-----------------------------------------------------------------------------
467// simple_owner_lock
468
469// pthread owner lock
470// - reacquire for cond var
471// - recursive acquisition
472// - ownership
473struct simple_owner_lock {
474 // List of blocked threads
[beeff61e]475 dlist( select_node ) blocked_threads;
[f835806]476
477 // Spin lock used for mutual exclusion
478 __spinlock_t lock;
479
480 // owner showing if lock is held
481 struct thread$ * owner;
482
483 size_t recursion_count;
484};
485
486static inline void ?{}( simple_owner_lock & this ) with(this) {
487 lock{};
488 blocked_threads{};
489 owner = 0p;
490 recursion_count = 0;
491}
492static inline void ^?{}( simple_owner_lock & this ) {}
493static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
494static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
495
[beeff61e]496static inline void lock( simple_owner_lock & this ) with(this) {
497 if ( owner == active_thread() ) {
[ae06e0b]498 recursion_count++;
499 return;
500 }
501 lock( lock __cfaabi_dbg_ctx2 );
502
[beeff61e]503 if ( owner != 0p ) {
504 select_node node;
505 insert_last( blocked_threads, node );
[ae06e0b]506 unlock( lock );
507 park( );
508 return;
509 }
510 owner = active_thread();
511 recursion_count = 1;
512 unlock( lock );
513}
514
[beeff61e]515static inline void pop_node( simple_owner_lock & this ) with(this) {
516 __handle_waituntil_OR( blocked_threads );
517 select_node * node = &try_pop_front( blocked_threads );
518 if ( node ) {
519 owner = node->blocked_thread;
520 recursion_count = 1;
521 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
522 wake_one( blocked_threads, *node );
523 } else {
524 owner = 0p;
525 recursion_count = 0;
526 }
527}
[ae06e0b]528
[beeff61e]529static inline void unlock( simple_owner_lock & this ) with(this) {
[ae06e0b]530 lock( lock __cfaabi_dbg_ctx2 );
531 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
532 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
533 // if recursion count is zero release lock and set new owner if one is waiting
534 recursion_count--;
535 if ( recursion_count == 0 ) {
[beeff61e]536 pop_node( this );
[ae06e0b]537 }
538 unlock( lock );
539}
540
[fece3d9]541static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
[ae06e0b]542 lock( lock __cfaabi_dbg_ctx2 );
543 // lock held
544 if ( owner != 0p ) {
[beeff61e]545 insert_last( blocked_threads, *(select_node *)t->link_node );
[ae06e0b]546 }
547 // lock not held
548 else {
549 owner = t;
550 recursion_count = 1;
551 unpark( t );
552 }
[b77f0e1]553 unlock( lock );
[ae06e0b]554}
555
[fece3d9]556static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
[ae06e0b]557 lock( lock __cfaabi_dbg_ctx2 );
558 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
559 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
560
561 size_t ret = recursion_count;
562
[beeff61e]563 pop_node( this );
[ae06e0b]564
[beeff61e]565 select_node node;
566 active_thread()->link_node = (void *)&node;
[ae06e0b]567 unlock( lock );
[fece3d9]568
569 pre_park_then_park( pp_fn, pp_datum );
[beeff61e]570
[ae06e0b]571 return ret;
572}
573
[beeff61e]574static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
575
576// waituntil() support
577static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
578 lock( lock __cfaabi_dbg_ctx2 );
579
580 // check if we can complete operation. If so race to establish winner in special OR case
581 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
582 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
583 unlock( lock );
584 return false;
585 }
586 }
587
588 if ( owner == active_thread() ) {
589 recursion_count++;
590 if ( node.park_counter ) __make_select_node_available( node );
591 unlock( lock );
592 return true;
593 }
594
595 if ( owner != 0p ) {
596 insert_last( blocked_threads, node );
597 unlock( lock );
598 return false;
599 }
600
601 owner = active_thread();
602 recursion_count = 1;
603
604 if ( node.park_counter ) __make_select_node_available( node );
605 unlock( lock );
606 return true;
607}
608
609static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
610 lock( lock __cfaabi_dbg_ctx2 );
611 if ( node`isListed ) {
612 remove( node );
613 unlock( lock );
614 return false;
615 }
616
617 if ( owner == active_thread() ) {
618 recursion_count--;
619 if ( recursion_count == 0 ) {
620 pop_node( this );
621 }
622 }
623 unlock( lock );
624 return false;
625}
626
627static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
628
[ae06e0b]629
[f835806]630//-----------------------------------------------------------------------------
631// Spin Queue Lock
632
633// - No reacquire for cond var
634// - No recursive acquisition
635// - No ownership
636// - spin lock with no locking/atomics in unlock
637struct spin_queue_lock {
638 // Spin lock used for mutual exclusion
639 mcs_spin_lock lock;
640
641 // flag showing if lock is held
[db7a3ad]642 volatile bool held;
[f835806]643};
644
645static inline void ?{}( spin_queue_lock & this ) with(this) {
646 lock{};
647 held = false;
648}
649static inline void ^?{}( spin_queue_lock & this ) {}
650static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
651static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
652
[378de69]653// if this is called recursively IT WILL DEADLOCK!
[beeff61e]654static inline void lock( spin_queue_lock & this ) with(this) {
[f835806]655 mcs_spin_node node;
656 lock( lock, node );
[df932552]657 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
658 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]659 unlock( lock, node );
660}
661
[beeff61e]662static inline void unlock( spin_queue_lock & this ) with(this) {
[2ed32fa7]663 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]664}
665
[5a05946]666DEFAULT_ON_NOTIFY( spin_queue_lock )
667DEFAULT_ON_WAIT( spin_queue_lock )
668DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
[f835806]669
670//-----------------------------------------------------------------------------
671// MCS Block Spin Lock
672
673// - No reacquire for cond var
674// - No recursive acquisition
675// - No ownership
676// - Blocks but first node spins (like spin queue but blocking for not first thd)
677struct mcs_block_spin_lock {
678 // Spin lock used for mutual exclusion
679 mcs_lock lock;
680
681 // flag showing if lock is held
[db7a3ad]682 volatile bool held;
[f835806]683};
684
685static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
686 lock{};
687 held = false;
688}
689static inline void ^?{}( mcs_block_spin_lock & this ) {}
690static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
691static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
692
693// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]694static inline void lock( mcs_block_spin_lock & this ) with(this) {
[f835806]695 mcs_node node;
696 lock( lock, node );
[fd365da]697 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
698 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]699 unlock( lock, node );
700}
701
702static inline void unlock(mcs_block_spin_lock & this) with(this) {
[fd365da]703 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
[f835806]704}
705
[5a05946]706DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
707DEFAULT_ON_WAIT( mcs_block_spin_lock )
708DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
[f835806]709
710//-----------------------------------------------------------------------------
711// Block Spin Lock
712
713// - No reacquire for cond var
714// - No recursive acquisition
715// - No ownership
716// - Blocks but first node spins (like spin queue but blocking for not first thd)
717struct block_spin_lock {
718 // Spin lock used for mutual exclusion
719 fast_block_lock lock;
720
721 // flag showing if lock is held
[db7a3ad]722 volatile bool held;
[f835806]723};
724
725static inline void ?{}( block_spin_lock & this ) with(this) {
726 lock{};
727 held = false;
728}
729static inline void ^?{}( block_spin_lock & this ) {}
730static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
731static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
732
733// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]734static inline void lock( block_spin_lock & this ) with(this) {
[f835806]735 lock( lock );
[fd365da]736 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
737 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
[f835806]738 unlock( lock );
739}
740
[beeff61e]741static inline void unlock( block_spin_lock & this ) with(this) {
[fd365da]742 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]743}
744
[beeff61e]745static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
[b77f0e1]746 // first we acquire internal fast_block_lock
747 lock( lock __cfaabi_dbg_ctx2 );
748 if ( held ) { // if internal fast_block_lock is held
749 insert_last( blocked_threads, *t );
750 unlock( lock );
751 return;
752 }
753 // if internal fast_block_lock is not held
754 held = true;
755 unlock( lock );
756
757 unpark(t);
758}
[5a05946]759DEFAULT_ON_WAIT( block_spin_lock )
[beeff61e]760static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
[b77f0e1]761 // now we acquire the entire block_spin_lock upon waking up
762 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
763 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
764 unlock( lock ); // Now we release the internal fast_spin_lock
765}
[f835806]766
[ac5816d]767//-----------------------------------------------------------------------------
[82f4063]768// // info_thread
769// // the info thread is a wrapper around a thread used
770// // to store extra data for use in the condition variable
[fd54fef]771forall(L & | is_blocking_lock(L)) {
[ac5816d]772 struct info_thread;
[848439f]773}
774
[ac5816d]775//-----------------------------------------------------------------------------
776// Synchronization Locks
[fd54fef]777forall(L & | is_blocking_lock(L)) {
[7f958c4]778
779 //-----------------------------------------------------------------------------
780 // condition_variable
781
782 // The multi-tool condition variable
783 // - can pass timeouts to wait for either a signal or timeout
784 // - can wait without passing a lock
785 // - can have waiters reacquire different locks while waiting on the same cond var
786 // - has shadow queue
787 // - can be signalled outside of critical sections with no locks held
[eeb5023]788 struct condition_variable {
[848439f]789 // Spin lock used for mutual exclusion
790 __spinlock_t lock;
791
792 // List of blocked threads
[82f4063]793 dlist( info_thread(L) ) blocked_threads;
[848439f]794
795 // Count of current blocked threads
796 int count;
797 };
[e84ab3d]798
[848439f]799
[ac5816d]800 void ?{}( condition_variable(L) & this );
[848439f]801 void ^?{}( condition_variable(L) & this );
802
[eeb5023]803 bool notify_one( condition_variable(L) & this );
804 bool notify_all( condition_variable(L) & this );
[848439f]805
[eeb5023]806 uintptr_t front( condition_variable(L) & this );
[848439f]807
[ac5816d]808 bool empty ( condition_variable(L) & this );
809 int counter( condition_variable(L) & this );
[848439f]810
[eeb5023]811 void wait( condition_variable(L) & this );
812 void wait( condition_variable(L) & this, uintptr_t info );
[dff1fd1]813 bool wait( condition_variable(L) & this, Duration duration );
814 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
[848439f]815
[eeb5023]816 void wait( condition_variable(L) & this, L & l );
817 void wait( condition_variable(L) & this, L & l, uintptr_t info );
[dff1fd1]818 bool wait( condition_variable(L) & this, L & l, Duration duration );
819 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]820
821 //-----------------------------------------------------------------------------
822 // fast_cond_var
823
824 // The trimmed and slim condition variable
825 // - no internal lock so you must hold a lock while using this cond var
826 // - signalling without holding branded lock is UNSAFE!
827 // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]828
[7f958c4]829 struct fast_cond_var {
830 // List of blocked threads
831 dlist( info_thread(L) ) blocked_threads;
832 #ifdef __CFA_DEBUG__
833 L * lock_used;
834 #endif
835 };
836
837 void ?{}( fast_cond_var(L) & this );
838 void ^?{}( fast_cond_var(L) & this );
839
840 bool notify_one( fast_cond_var(L) & this );
841 bool notify_all( fast_cond_var(L) & this );
842
843 uintptr_t front( fast_cond_var(L) & this );
844 bool empty ( fast_cond_var(L) & this );
845
846 void wait( fast_cond_var(L) & this, L & l );
847 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
[ae06e0b]848
849
850 //-----------------------------------------------------------------------------
851 // pthread_cond_var
852 //
853 // - cond var with minimal footprint
854 // - supports operations needed for phthread cond
855
856 struct pthread_cond_var {
857 dlist( info_thread(L) ) blocked_threads;
858 __spinlock_t lock;
859 };
860
861 void ?{}( pthread_cond_var(L) & this );
862 void ^?{}( pthread_cond_var(L) & this );
863
864 bool notify_one( pthread_cond_var(L) & this );
865 bool notify_all( pthread_cond_var(L) & this );
866
867 uintptr_t front( pthread_cond_var(L) & this );
868 bool empty ( pthread_cond_var(L) & this );
869
870 void wait( pthread_cond_var(L) & this, L & l );
871 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
872 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
873 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
[8a97248]874}
Note: See TracBrowser for help on using the repository browser.