source: libcfa/src/concurrency/locks.hfa@ bccd70a

ast-experimental
Last change on this file since bccd70a was 5ece8ce, checked in by caparsons <caparson@…>, 2 years ago

fixed a bug in mcs implementation and cleaned up a bit

  • Property mode set to 100644
File size: 27.7 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[88ac843e]23#include "containers/lockfree.hfa"
[82f4063]24#include "containers/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[beeff61e]32#include "select.hfa"
33
[b77f0e1]34#include <fstream.hfa>
35
36// futex headers
37#include <linux/futex.h> /* Definition of FUTEX_* constants */
38#include <sys/syscall.h> /* Definition of SYS_* constants */
39#include <unistd.h>
40
[fece3d9]41typedef void (*__cfa_pre_park)( void * );
42
43static inline void pre_park_noop( void * ) {}
44
45//-----------------------------------------------------------------------------
46// is_blocking_lock
47forall( L & | sized(L) )
48trait is_blocking_lock {
49 // For synchronization locks to use when acquiring
50 void on_notify( L &, struct thread$ * );
51
52 // For synchronization locks to use when releasing
53 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
54
55 // to set recursion count after getting signalled;
56 void on_wakeup( L &, size_t recursion );
57};
58
59static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
60 pp_fn( pp_datum );
61 park();
62}
63
[5a05946]64// macros for default routine impls for is_blocking_lock trait that do not wait-morph
65
66#define DEFAULT_ON_NOTIFY( lock_type ) \
67 static inline void on_notify( lock_type & this, thread$ * t ){ unpark(t); }
68
69#define DEFAULT_ON_WAIT( lock_type ) \
70 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
71 unlock( this ); \
72 pre_park_then_park( pp_fn, pp_datum ); \
73 return 0; \
74 }
75
76// on_wakeup impl if lock should be reacquired after waking up
77#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
78 static inline void on_wakeup( lock_type & this, size_t recursion ) { lock( this ); }
79
80// on_wakeup impl if lock will not be reacquired after waking up
81#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
82 static inline void on_wakeup( lock_type & this, size_t recursion ) {}
83
84
85
[f4ec5e45]86//-----------------------------------------------------------------------------
87// Semaphore
88struct semaphore {
89 __spinlock_t lock;
90 int count;
[e84ab3d]91 __queue_t(thread$) waiting;
[f4ec5e45]92};
93
94void ?{}(semaphore & this, int count = 1);
95void ^?{}(semaphore & this);
96bool P (semaphore & this);
97bool V (semaphore & this);
98bool V (semaphore & this, unsigned count);
[e84ab3d]99thread$ * V (semaphore & this, bool );
[f4ec5e45]100
[ab1b971]101//----------
102struct single_acquisition_lock {
103 inline blocking_lock;
104};
105
106static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
107static inline void ^?{}( single_acquisition_lock & this ) {}
[22b7579]108static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
109static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
110static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
[fece3d9]111static inline size_t on_wait ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
[22b7579]112static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]113static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[beeff61e]114static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
115static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
116static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[ab1b971]117
118//----------
119struct owner_lock {
120 inline blocking_lock;
121};
122
123static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
124static inline void ^?{}( owner_lock & this ) {}
[f19497c]125static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
[d27b6be]126static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
[f19497c]127static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
[fece3d9]128static inline size_t on_wait ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
[22b7579]129static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]130static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[beeff61e]131static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
132static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
133static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
[ab1b971]134
[7f958c4]135//-----------------------------------------------------------------------------
136// MCS Lock
[f4ec5e45]137struct mcs_node {
138 mcs_node * volatile next;
139 single_sem sem;
140};
141
[8f5576d5]142static inline void ?{}(mcs_node & this) { this.next = 0p; }
[f4ec5e45]143
144static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
145 return node->next;
146}
147
148struct mcs_lock {
149 mcs_queue(mcs_node) queue;
150};
151
152static inline void lock(mcs_lock & l, mcs_node & n) {
153 if(push(l.queue, &n))
154 wait(n.sem);
155}
156
157static inline void unlock(mcs_lock & l, mcs_node & n) {
158 mcs_node * next = advance(l.queue, &n);
159 if(next) post(next->sem);
160}
161
[f835806]162//-----------------------------------------------------------------------------
163// MCS Spin Lock
164// - No recursive acquisition
165// - Needs to be released by owner
166
167struct mcs_spin_node {
168 mcs_spin_node * volatile next;
[db7a3ad]169 volatile bool locked;
[f835806]170};
171
172struct mcs_spin_queue {
173 mcs_spin_node * volatile tail;
174};
175
176static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
177
178struct mcs_spin_lock {
179 mcs_spin_queue queue;
180};
181
182static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
[5ece8ce]183 n.locked = true;
[f835806]184 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
[5ece8ce]185 if( prev == 0p ) return;
[9e3d123]186 prev->next = &n;
[5ece8ce]187 while( __atomic_load_n(&n.locked, __ATOMIC_RELAXED) ) Pause();
[f835806]188}
189
190static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
191 mcs_spin_node * n_ptr = &n;
[9e3d123]192 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
[5ece8ce]193 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) Pause();
[9e3d123]194 n.next->locked = false;
[f835806]195}
196
[b77f0e1]197//-----------------------------------------------------------------------------
198// futex_mutex
199
200// - Kernel thd blocking alternative to the spinlock
201// - No ownership (will deadlock on reacq)
[5a05946]202// - no reacq on wakeup
[b77f0e1]203struct futex_mutex {
204 // lock state any state other than UNLOCKED is locked
205 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
206
207 // stores a lock state
208 int val;
209};
210
211// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
[7d9598d8]212static inline int futex(int *uaddr, int futex_op, int val) {
[b77f0e1]213 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
214}
215
[5a05946]216static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
[b77f0e1]217
[5a05946]218static inline bool internal_try_lock( futex_mutex & this, int & compare_val) with(this) {
[b77f0e1]219 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
220}
221
[5a05946]222static inline int internal_exchange( futex_mutex & this ) with(this) {
[b77f0e1]223 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
224}
225
226// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]227static inline void lock( futex_mutex & this ) with(this) {
[b77f0e1]228 int state;
229
[a45e21c]230 for( int spin = 4; spin < 1024; spin += spin) {
231 state = 0;
232 // if unlocked, lock and return
233 if (internal_try_lock(this, state)) return;
234 if (2 == state) break;
235 for (int i = 0; i < spin; i++) Pause();
236 }
[b77f0e1]237
238 // if not in contended state, set to be in contended state
239 if (state != 2) state = internal_exchange(this);
240
241 // block and spin until we win the lock
242 while (state != 0) {
243 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
244 state = internal_exchange(this);
245 }
246}
247
248static inline void unlock(futex_mutex & this) with(this) {
[a45e21c]249 // if uncontended do atomic unlock and then return
250 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
[b77f0e1]251
252 // otherwise threads are blocked so we must wake one
253 futex((int *)&val, FUTEX_WAKE, 1);
254}
255
[5a05946]256DEFAULT_ON_NOTIFY( futex_mutex )
257DEFAULT_ON_WAIT( futex_mutex )
258DEFAULT_ON_WAKEUP_NO_REACQ( futex_mutex )
[b77f0e1]259
[a45e21c]260//-----------------------------------------------------------------------------
261// go_mutex
262
263// - Kernel thd blocking alternative to the spinlock
264// - No ownership (will deadlock on reacq)
265// - Golang's flavour of mutex
266// - Impl taken from Golang: src/runtime/lock_futex.go
267struct go_mutex {
268 // lock state any state other than UNLOCKED is locked
269 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
270
271 // stores a lock state
272 int val;
273};
274static inline void ?{}( go_mutex & this ) with(this) { val = 0; }
[5a05946]275// static inline void ?{}( go_mutex & this, go_mutex this2 ) = void; // these don't compile correctly at the moment so they should be omitted
276// static inline void ?=?( go_mutex & this, go_mutex this2 ) = void;
[a45e21c]277
278static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
279 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
280}
281
282static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
283 return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
284}
285
286// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]287static inline void lock( go_mutex & this ) with( this ) {
[a45e21c]288 int state, init_state;
289
290 // speculative grab
291 state = internal_exchange(this, 1);
292 if ( !state ) return; // state == 0
293 init_state = state;
294 for (;;) {
[bd72c284]295 for( int i = 0; i < 4; i++ ) {
[a45e21c]296 while( !val ) { // lock unlocked
297 state = 0;
[beeff61e]298 if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]299 }
[bd72c284]300 for (int i = 0; i < 30; i++) Pause();
[a45e21c]301 }
302
303 while( !val ) { // lock unlocked
304 state = 0;
[beeff61e]305 if ( internal_try_lock( this, state, init_state ) ) return;
[a45e21c]306 }
307 sched_yield();
308
309 // if not in contended state, set to be in contended state
[beeff61e]310 state = internal_exchange( this, 2 );
[a45e21c]311 if ( !state ) return; // state == 0
312 init_state = 2;
[beeff61e]313 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
[a45e21c]314 }
315}
316
317static inline void unlock( go_mutex & this ) with(this) {
318 // if uncontended do atomic unlock and then return
[beeff61e]319 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
[a45e21c]320
321 // otherwise threads are blocked so we must wake one
[beeff61e]322 futex( (int *)&val, FUTEX_WAKE, 1 );
[a45e21c]323}
324
[5a05946]325DEFAULT_ON_NOTIFY( go_mutex )
326DEFAULT_ON_WAIT( go_mutex )
327DEFAULT_ON_WAKEUP_NO_REACQ( go_mutex )
[a45e21c]328
[7f958c4]329//-----------------------------------------------------------------------------
[0cee082]330// Exponential backoff then block lock
331struct exp_backoff_then_block_lock {
[5a46e09]332 // Spin lock used for mutual exclusion
333 __spinlock_t spinlock;
334
335 // List of blocked threads
[e84ab3d]336 dlist( thread$ ) blocked_threads;
[5a46e09]337
338 // Used for comparing and exchanging
339 volatile size_t lock_value;
340};
341
[0cee082]342static inline void ?{}( exp_backoff_then_block_lock & this ) {
[5a46e09]343 this.spinlock{};
344 this.blocked_threads{};
345 this.lock_value = 0;
346}
[5a05946]347static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
348static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;
[5a46e09]349
[a45e21c]350static inline void ^?{}( exp_backoff_then_block_lock & this ){}
351
[beeff61e]352static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
[d30e3eb]353 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
[5a46e09]354}
355
[beeff61e]356static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
[5a46e09]357
[beeff61e]358static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
359 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
[5a46e09]360}
361
[beeff61e]362static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
[d30e3eb]363 lock( spinlock __cfaabi_dbg_ctx2 );
364 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
365 unlock( spinlock );
366 return true;
367 }
368 insert_last( blocked_threads, *active_thread() );
369 unlock( spinlock );
[5a46e09]370 park( );
371 return true;
372}
373
[beeff61e]374static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
[5a46e09]375 size_t compare_val = 0;
[b77f0e1]376 int spin = 4;
[d30e3eb]377
[5a46e09]378 // linear backoff
379 for( ;; ) {
380 compare_val = 0;
[0d4f954]381 if (internal_try_lock(this, compare_val)) return;
[5a46e09]382 if (2 == compare_val) break;
383 for (int i = 0; i < spin; i++) Pause();
[b77f0e1]384 if (spin >= 1024) break;
[5a46e09]385 spin += spin;
386 }
387
[0d4f954]388 if(2 != compare_val && try_lock_contention(this)) return;
[b7763da]389 // block until signalled
[0d4f954]390 while (block(this)) if(try_lock_contention(this)) return;
[b7763da]391}
392
[beeff61e]393static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
[5a46e09]394 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
[d30e3eb]395 lock( spinlock __cfaabi_dbg_ctx2 );
396 thread$ * t = &try_pop_front( blocked_threads );
397 unlock( spinlock );
398 unpark( t );
[5a46e09]399}
400
[5a05946]401DEFAULT_ON_NOTIFY( exp_backoff_then_block_lock )
402DEFAULT_ON_WAIT( exp_backoff_then_block_lock )
403DEFAULT_ON_WAKEUP_REACQ( exp_backoff_then_block_lock )
[5a46e09]404
[7f958c4]405//-----------------------------------------------------------------------------
406// Fast Block Lock
407
[f835806]408// minimal blocking lock
[7f958c4]409// - No reacquire for cond var
410// - No recursive acquisition
411// - No ownership
412struct fast_block_lock {
413 // List of blocked threads
414 dlist( thread$ ) blocked_threads;
415
[f835806]416 // Spin lock used for mutual exclusion
417 __spinlock_t lock;
418
419 // flag showing if lock is held
[7f958c4]420 bool held:1;
421};
422
423static inline void ?{}( fast_block_lock & this ) with(this) {
424 lock{};
425 blocked_threads{};
426 held = false;
427}
428static inline void ^?{}( fast_block_lock & this ) {}
429static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
430static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
431
432// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]433static inline void lock( fast_block_lock & this ) with(this) {
[7f958c4]434 lock( lock __cfaabi_dbg_ctx2 );
[b77f0e1]435 if ( held ) {
[7f958c4]436 insert_last( blocked_threads, *active_thread() );
437 unlock( lock );
438 park( );
439 return;
440 }
441 held = true;
442 unlock( lock );
443}
444
[beeff61e]445static inline void unlock( fast_block_lock & this ) with(this) {
[7f958c4]446 lock( lock __cfaabi_dbg_ctx2 );
447 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
448 thread$ * t = &try_pop_front( blocked_threads );
449 held = ( t ? true : false );
450 unpark( t );
451 unlock( lock );
452}
453
[beeff61e]454static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
[0cee082]455 lock( lock __cfaabi_dbg_ctx2 );
456 insert_last( blocked_threads, *t );
457 unlock( lock );
[b77f0e1]458}
[5a05946]459DEFAULT_ON_WAIT( fast_block_lock )
460DEFAULT_ON_WAKEUP_NO_REACQ( fast_block_lock )
[7f958c4]461
[f835806]462//-----------------------------------------------------------------------------
463// simple_owner_lock
464
465// pthread owner lock
466// - reacquire for cond var
467// - recursive acquisition
468// - ownership
469struct simple_owner_lock {
470 // List of blocked threads
[beeff61e]471 dlist( select_node ) blocked_threads;
[f835806]472
473 // Spin lock used for mutual exclusion
474 __spinlock_t lock;
475
476 // owner showing if lock is held
477 struct thread$ * owner;
478
479 size_t recursion_count;
480};
481
482static inline void ?{}( simple_owner_lock & this ) with(this) {
483 lock{};
484 blocked_threads{};
485 owner = 0p;
486 recursion_count = 0;
487}
488static inline void ^?{}( simple_owner_lock & this ) {}
489static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
490static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
491
[beeff61e]492static inline void lock( simple_owner_lock & this ) with(this) {
493 if ( owner == active_thread() ) {
[ae06e0b]494 recursion_count++;
495 return;
496 }
497 lock( lock __cfaabi_dbg_ctx2 );
498
[beeff61e]499 if ( owner != 0p ) {
500 select_node node;
501 insert_last( blocked_threads, node );
[ae06e0b]502 unlock( lock );
503 park( );
504 return;
505 }
506 owner = active_thread();
507 recursion_count = 1;
508 unlock( lock );
509}
510
[beeff61e]511static inline void pop_node( simple_owner_lock & this ) with(this) {
512 __handle_waituntil_OR( blocked_threads );
513 select_node * node = &try_pop_front( blocked_threads );
514 if ( node ) {
515 owner = node->blocked_thread;
516 recursion_count = 1;
517 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
518 wake_one( blocked_threads, *node );
519 } else {
520 owner = 0p;
521 recursion_count = 0;
522 }
523}
[ae06e0b]524
[beeff61e]525static inline void unlock( simple_owner_lock & this ) with(this) {
[ae06e0b]526 lock( lock __cfaabi_dbg_ctx2 );
527 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
528 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
529 // if recursion count is zero release lock and set new owner if one is waiting
530 recursion_count--;
531 if ( recursion_count == 0 ) {
[beeff61e]532 pop_node( this );
[ae06e0b]533 }
534 unlock( lock );
535}
536
[fece3d9]537static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
[ae06e0b]538 lock( lock __cfaabi_dbg_ctx2 );
539 // lock held
540 if ( owner != 0p ) {
[beeff61e]541 insert_last( blocked_threads, *(select_node *)t->link_node );
[ae06e0b]542 }
543 // lock not held
544 else {
545 owner = t;
546 recursion_count = 1;
547 unpark( t );
548 }
[b77f0e1]549 unlock( lock );
[ae06e0b]550}
551
[fece3d9]552static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
[ae06e0b]553 lock( lock __cfaabi_dbg_ctx2 );
554 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
555 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
556
557 size_t ret = recursion_count;
558
[beeff61e]559 pop_node( this );
[ae06e0b]560
[beeff61e]561 select_node node;
562 active_thread()->link_node = (void *)&node;
[ae06e0b]563 unlock( lock );
[fece3d9]564
565 pre_park_then_park( pp_fn, pp_datum );
[beeff61e]566
[ae06e0b]567 return ret;
568}
569
[beeff61e]570static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
571
572// waituntil() support
573static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
574 lock( lock __cfaabi_dbg_ctx2 );
575
576 // check if we can complete operation. If so race to establish winner in special OR case
577 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
578 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
579 unlock( lock );
580 return false;
581 }
582 }
583
584 if ( owner == active_thread() ) {
585 recursion_count++;
586 if ( node.park_counter ) __make_select_node_available( node );
587 unlock( lock );
588 return true;
589 }
590
591 if ( owner != 0p ) {
592 insert_last( blocked_threads, node );
593 unlock( lock );
594 return false;
595 }
596
597 owner = active_thread();
598 recursion_count = 1;
599
600 if ( node.park_counter ) __make_select_node_available( node );
601 unlock( lock );
602 return true;
603}
604
605static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
606 lock( lock __cfaabi_dbg_ctx2 );
607 if ( node`isListed ) {
608 remove( node );
609 unlock( lock );
610 return false;
611 }
612
613 if ( owner == active_thread() ) {
614 recursion_count--;
615 if ( recursion_count == 0 ) {
616 pop_node( this );
617 }
618 }
619 unlock( lock );
620 return false;
621}
622
623static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
624
[ae06e0b]625
[f835806]626//-----------------------------------------------------------------------------
627// Spin Queue Lock
628
629// - No reacquire for cond var
630// - No recursive acquisition
631// - No ownership
632// - spin lock with no locking/atomics in unlock
633struct spin_queue_lock {
634 // Spin lock used for mutual exclusion
635 mcs_spin_lock lock;
636
637 // flag showing if lock is held
[db7a3ad]638 volatile bool held;
[f835806]639};
640
641static inline void ?{}( spin_queue_lock & this ) with(this) {
642 lock{};
643 held = false;
644}
645static inline void ^?{}( spin_queue_lock & this ) {}
646static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
647static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
648
[378de69]649// if this is called recursively IT WILL DEADLOCK!
[beeff61e]650static inline void lock( spin_queue_lock & this ) with(this) {
[f835806]651 mcs_spin_node node;
652 lock( lock, node );
[df932552]653 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
654 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]655 unlock( lock, node );
656}
657
[beeff61e]658static inline void unlock( spin_queue_lock & this ) with(this) {
[2ed32fa7]659 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]660}
661
[5a05946]662DEFAULT_ON_NOTIFY( spin_queue_lock )
663DEFAULT_ON_WAIT( spin_queue_lock )
664DEFAULT_ON_WAKEUP_REACQ( spin_queue_lock )
[f835806]665
666//-----------------------------------------------------------------------------
667// MCS Block Spin Lock
668
669// - No reacquire for cond var
670// - No recursive acquisition
671// - No ownership
672// - Blocks but first node spins (like spin queue but blocking for not first thd)
673struct mcs_block_spin_lock {
674 // Spin lock used for mutual exclusion
675 mcs_lock lock;
676
677 // flag showing if lock is held
[db7a3ad]678 volatile bool held;
[f835806]679};
680
681static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
682 lock{};
683 held = false;
684}
685static inline void ^?{}( mcs_block_spin_lock & this ) {}
686static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
687static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
688
689// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]690static inline void lock( mcs_block_spin_lock & this ) with(this) {
[f835806]691 mcs_node node;
692 lock( lock, node );
[fd365da]693 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
694 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]695 unlock( lock, node );
696}
697
698static inline void unlock(mcs_block_spin_lock & this) with(this) {
[fd365da]699 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
[f835806]700}
701
[5a05946]702DEFAULT_ON_NOTIFY( mcs_block_spin_lock )
703DEFAULT_ON_WAIT( mcs_block_spin_lock )
704DEFAULT_ON_WAKEUP_REACQ( mcs_block_spin_lock )
[f835806]705
706//-----------------------------------------------------------------------------
707// Block Spin Lock
708
709// - No reacquire for cond var
710// - No recursive acquisition
711// - No ownership
712// - Blocks but first node spins (like spin queue but blocking for not first thd)
713struct block_spin_lock {
714 // Spin lock used for mutual exclusion
715 fast_block_lock lock;
716
717 // flag showing if lock is held
[db7a3ad]718 volatile bool held;
[f835806]719};
720
721static inline void ?{}( block_spin_lock & this ) with(this) {
722 lock{};
723 held = false;
724}
725static inline void ^?{}( block_spin_lock & this ) {}
726static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
727static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
728
729// if this is called recursively IT WILL DEADLOCK!!!!!
[beeff61e]730static inline void lock( block_spin_lock & this ) with(this) {
[f835806]731 lock( lock );
[fd365da]732 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
733 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
[f835806]734 unlock( lock );
735}
736
[beeff61e]737static inline void unlock( block_spin_lock & this ) with(this) {
[fd365da]738 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]739}
740
[beeff61e]741static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
[b77f0e1]742 // first we acquire internal fast_block_lock
743 lock( lock __cfaabi_dbg_ctx2 );
744 if ( held ) { // if internal fast_block_lock is held
745 insert_last( blocked_threads, *t );
746 unlock( lock );
747 return;
748 }
749 // if internal fast_block_lock is not held
750 held = true;
751 unlock( lock );
752
753 unpark(t);
754}
[5a05946]755DEFAULT_ON_WAIT( block_spin_lock )
[beeff61e]756static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
[b77f0e1]757 // now we acquire the entire block_spin_lock upon waking up
758 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
759 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
760 unlock( lock ); // Now we release the internal fast_spin_lock
761}
[f835806]762
[ac5816d]763//-----------------------------------------------------------------------------
[82f4063]764// // info_thread
765// // the info thread is a wrapper around a thread used
766// // to store extra data for use in the condition variable
[fd54fef]767forall(L & | is_blocking_lock(L)) {
[ac5816d]768 struct info_thread;
[848439f]769}
770
[ac5816d]771//-----------------------------------------------------------------------------
772// Synchronization Locks
[fd54fef]773forall(L & | is_blocking_lock(L)) {
[7f958c4]774
775 //-----------------------------------------------------------------------------
776 // condition_variable
777
778 // The multi-tool condition variable
779 // - can pass timeouts to wait for either a signal or timeout
780 // - can wait without passing a lock
781 // - can have waiters reacquire different locks while waiting on the same cond var
782 // - has shadow queue
783 // - can be signalled outside of critical sections with no locks held
[eeb5023]784 struct condition_variable {
[848439f]785 // Spin lock used for mutual exclusion
786 __spinlock_t lock;
787
788 // List of blocked threads
[82f4063]789 dlist( info_thread(L) ) blocked_threads;
[848439f]790
791 // Count of current blocked threads
792 int count;
793 };
[e84ab3d]794
[848439f]795
[ac5816d]796 void ?{}( condition_variable(L) & this );
[848439f]797 void ^?{}( condition_variable(L) & this );
798
[eeb5023]799 bool notify_one( condition_variable(L) & this );
800 bool notify_all( condition_variable(L) & this );
[848439f]801
[eeb5023]802 uintptr_t front( condition_variable(L) & this );
[848439f]803
[ac5816d]804 bool empty ( condition_variable(L) & this );
805 int counter( condition_variable(L) & this );
[848439f]806
[eeb5023]807 void wait( condition_variable(L) & this );
808 void wait( condition_variable(L) & this, uintptr_t info );
[dff1fd1]809 bool wait( condition_variable(L) & this, Duration duration );
810 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
[848439f]811
[eeb5023]812 void wait( condition_variable(L) & this, L & l );
813 void wait( condition_variable(L) & this, L & l, uintptr_t info );
[dff1fd1]814 bool wait( condition_variable(L) & this, L & l, Duration duration );
815 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]816
817 //-----------------------------------------------------------------------------
818 // fast_cond_var
819
820 // The trimmed and slim condition variable
821 // - no internal lock so you must hold a lock while using this cond var
822 // - signalling without holding branded lock is UNSAFE!
823 // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]824
[7f958c4]825 struct fast_cond_var {
826 // List of blocked threads
827 dlist( info_thread(L) ) blocked_threads;
828 #ifdef __CFA_DEBUG__
829 L * lock_used;
830 #endif
831 };
832
833 void ?{}( fast_cond_var(L) & this );
834 void ^?{}( fast_cond_var(L) & this );
835
836 bool notify_one( fast_cond_var(L) & this );
837 bool notify_all( fast_cond_var(L) & this );
838
839 uintptr_t front( fast_cond_var(L) & this );
840 bool empty ( fast_cond_var(L) & this );
841
842 void wait( fast_cond_var(L) & this, L & l );
843 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
[ae06e0b]844
845
846 //-----------------------------------------------------------------------------
847 // pthread_cond_var
848 //
849 // - cond var with minimal footprint
850 // - supports operations needed for phthread cond
851
852 struct pthread_cond_var {
853 dlist( info_thread(L) ) blocked_threads;
854 __spinlock_t lock;
855 };
856
857 void ?{}( pthread_cond_var(L) & this );
858 void ^?{}( pthread_cond_var(L) & this );
859
860 bool notify_one( pthread_cond_var(L) & this );
861 bool notify_all( pthread_cond_var(L) & this );
862
863 uintptr_t front( pthread_cond_var(L) & this );
864 bool empty ( pthread_cond_var(L) & this );
865
866 void wait( pthread_cond_var(L) & this, L & l );
867 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
868 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
869 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
[8a97248]870}
Note: See TracBrowser for help on using the repository browser.