source: libcfa/src/concurrency/locks.hfa@ e2887a9

ADT ast-experimental
Last change on this file since e2887a9 was fece3d9, checked in by caparsons <caparson@…>, 2 years ago

Added fix for cond var timeout handling race. Cleanup of locks.hfa/cfa changes is an ongoing TODO

  • Property mode set to 100644
File size: 28.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include "select.hfa"
33
34#include <fstream.hfa>
35
36// futex headers
37#include <linux/futex.h> /* Definition of FUTEX_* constants */
38#include <sys/syscall.h> /* Definition of SYS_* constants */
39#include <unistd.h>
40
41// C_TODO: cleanup this and locks.cfa
42// - appropriate separation of interface and impl
43// - clean up unused/unneeded locks
44// - change messy big blocking lock from inheritance to composition to remove need for flags
45
46typedef void (*__cfa_pre_park)( void * );
47
48static inline void pre_park_noop( void * ) {}
49
50//-----------------------------------------------------------------------------
51// is_blocking_lock
52forall( L & | sized(L) )
53trait is_blocking_lock {
54 // For synchronization locks to use when acquiring
55 void on_notify( L &, struct thread$ * );
56
57 // For synchronization locks to use when releasing
58 size_t on_wait( L &, __cfa_pre_park pp_fn, void * pp_datum );
59
60 // to set recursion count after getting signalled;
61 void on_wakeup( L &, size_t recursion );
62};
63
64static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
65 pp_fn( pp_datum );
66 park();
67}
68
69//-----------------------------------------------------------------------------
70// Semaphore
71struct semaphore {
72 __spinlock_t lock;
73 int count;
74 __queue_t(thread$) waiting;
75};
76
77void ?{}(semaphore & this, int count = 1);
78void ^?{}(semaphore & this);
79bool P (semaphore & this);
80bool V (semaphore & this);
81bool V (semaphore & this, unsigned count);
82thread$ * V (semaphore & this, bool );
83
84//----------
85struct single_acquisition_lock {
86 inline blocking_lock;
87};
88
89static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
90static inline void ^?{}( single_acquisition_lock & this ) {}
91static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
92static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
93static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
94static inline size_t on_wait ( single_acquisition_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
95static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
96static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
97static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
98static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
99static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
100
101//----------
102struct owner_lock {
103 inline blocking_lock;
104};
105
106static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
107static inline void ^?{}( owner_lock & this ) {}
108static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
109static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
110static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
111static inline size_t on_wait ( owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) { return on_wait ( (blocking_lock &)this, pp_fn, pp_datum ); }
112static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
113static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
114static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); }
115static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); }
116static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); }
117
118//-----------------------------------------------------------------------------
119// MCS Lock
120struct mcs_node {
121 mcs_node * volatile next;
122 single_sem sem;
123};
124
125static inline void ?{}(mcs_node & this) { this.next = 0p; }
126
127static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
128 return node->next;
129}
130
131struct mcs_lock {
132 mcs_queue(mcs_node) queue;
133};
134
135static inline void lock(mcs_lock & l, mcs_node & n) {
136 if(push(l.queue, &n))
137 wait(n.sem);
138}
139
140static inline void unlock(mcs_lock & l, mcs_node & n) {
141 mcs_node * next = advance(l.queue, &n);
142 if(next) post(next->sem);
143}
144
145//-----------------------------------------------------------------------------
146// MCS Spin Lock
147// - No recursive acquisition
148// - Needs to be released by owner
149
150struct mcs_spin_node {
151 mcs_spin_node * volatile next;
152 volatile bool locked;
153};
154
155struct mcs_spin_queue {
156 mcs_spin_node * volatile tail;
157};
158
159static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
160
161static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
162 return node->next;
163}
164
165struct mcs_spin_lock {
166 mcs_spin_queue queue;
167};
168
169static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
170 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
171 n.locked = true;
172 if(prev == 0p) return;
173 prev->next = &n;
174 while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
175}
176
177static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
178 mcs_spin_node * n_ptr = &n;
179 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
180 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
181 n.next->locked = false;
182}
183
184//-----------------------------------------------------------------------------
185// futex_mutex
186
187// - Kernel thd blocking alternative to the spinlock
188// - No ownership (will deadlock on reacq)
189struct futex_mutex {
190 // lock state any state other than UNLOCKED is locked
191 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
192
193 // stores a lock state
194 int val;
195};
196
197// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
198static inline int futex(int *uaddr, int futex_op, int val) {
199 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
200}
201
202static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
203
204static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
205 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
206}
207
208static inline int internal_exchange(futex_mutex & this) with(this) {
209 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
210}
211
212// if this is called recursively IT WILL DEADLOCK!!!!!
213static inline void lock( futex_mutex & this ) with(this) {
214 int state;
215
216 for( int spin = 4; spin < 1024; spin += spin) {
217 state = 0;
218 // if unlocked, lock and return
219 if (internal_try_lock(this, state)) return;
220 if (2 == state) break;
221 for (int i = 0; i < spin; i++) Pause();
222 }
223
224 // if not in contended state, set to be in contended state
225 if (state != 2) state = internal_exchange(this);
226
227 // block and spin until we win the lock
228 while (state != 0) {
229 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
230 state = internal_exchange(this);
231 }
232}
233
234static inline void unlock(futex_mutex & this) with(this) {
235 // if uncontended do atomic unlock and then return
236 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
237
238 // otherwise threads are blocked so we must wake one
239 futex((int *)&val, FUTEX_WAKE, 1);
240}
241
242static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
243static inline size_t on_wait( futex_mutex & this, __cfa_pre_park pp_fn, void * pp_datum ) {
244 unlock( this );
245 pre_park_then_park( pp_fn, pp_datum );
246 return 0;
247}
248
249// to set recursion count after getting signalled;
250static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
251
252//-----------------------------------------------------------------------------
253// go_mutex
254
255// - Kernel thd blocking alternative to the spinlock
256// - No ownership (will deadlock on reacq)
257// - Golang's flavour of mutex
258// - Impl taken from Golang: src/runtime/lock_futex.go
259struct go_mutex {
260 // lock state any state other than UNLOCKED is locked
261 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
262
263 // stores a lock state
264 int val;
265};
266
267static inline void ?{}( go_mutex & this ) with(this) { val = 0; }
268
269static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
270 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
271}
272
273static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
274 return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
275}
276
277// if this is called recursively IT WILL DEADLOCK!!!!!
278static inline void lock( go_mutex & this ) with( this ) {
279 int state, init_state;
280
281 // speculative grab
282 state = internal_exchange(this, 1);
283 if ( !state ) return; // state == 0
284 init_state = state;
285 for (;;) {
286 for( int i = 0; i < 4; i++ ) {
287 while( !val ) { // lock unlocked
288 state = 0;
289 if ( internal_try_lock( this, state, init_state ) ) return;
290 }
291 for (int i = 0; i < 30; i++) Pause();
292 }
293
294 while( !val ) { // lock unlocked
295 state = 0;
296 if ( internal_try_lock( this, state, init_state ) ) return;
297 }
298 sched_yield();
299
300 // if not in contended state, set to be in contended state
301 state = internal_exchange( this, 2 );
302 if ( !state ) return; // state == 0
303 init_state = 2;
304 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK
305 }
306}
307
308static inline void unlock( go_mutex & this ) with(this) {
309 // if uncontended do atomic unlock and then return
310 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return;
311
312 // otherwise threads are blocked so we must wake one
313 futex( (int *)&val, FUTEX_WAKE, 1 );
314}
315
316static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t ); }
317static inline size_t on_wait( go_mutex & this, __cfa_pre_park pp_fn, void * pp_datum ) {
318 unlock( this );
319 pre_park_then_park( pp_fn, pp_datum );
320 return 0;
321}
322static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
323
324//-----------------------------------------------------------------------------
325// Exponential backoff then block lock
326struct exp_backoff_then_block_lock {
327 // Spin lock used for mutual exclusion
328 __spinlock_t spinlock;
329
330 // List of blocked threads
331 dlist( thread$ ) blocked_threads;
332
333 // Used for comparing and exchanging
334 volatile size_t lock_value;
335};
336
337static inline void ?{}( exp_backoff_then_block_lock & this ) {
338 this.spinlock{};
339 this.blocked_threads{};
340 this.lock_value = 0;
341}
342
343static inline void ^?{}( exp_backoff_then_block_lock & this ){}
344
345static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) {
346 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
347}
348
349static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); }
350
351static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) {
352 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE );
353}
354
355static inline bool block( exp_backoff_then_block_lock & this ) with(this) {
356 lock( spinlock __cfaabi_dbg_ctx2 );
357 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
358 unlock( spinlock );
359 return true;
360 }
361 insert_last( blocked_threads, *active_thread() );
362 unlock( spinlock );
363 park( );
364 return true;
365}
366
367static inline void lock( exp_backoff_then_block_lock & this ) with(this) {
368 size_t compare_val = 0;
369 int spin = 4;
370
371 // linear backoff
372 for( ;; ) {
373 compare_val = 0;
374 if (internal_try_lock(this, compare_val)) return;
375 if (2 == compare_val) break;
376 for (int i = 0; i < spin; i++) Pause();
377 if (spin >= 1024) break;
378 spin += spin;
379 }
380
381 if(2 != compare_val && try_lock_contention(this)) return;
382 // block until signalled
383 while (block(this)) if(try_lock_contention(this)) return;
384}
385
386static inline void unlock( exp_backoff_then_block_lock & this ) with(this) {
387 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
388 lock( spinlock __cfaabi_dbg_ctx2 );
389 thread$ * t = &try_pop_front( blocked_threads );
390 unlock( spinlock );
391 unpark( t );
392}
393
394static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark( t ); }
395static inline size_t on_wait( exp_backoff_then_block_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) {
396 unlock( this );
397 pre_park_then_park( pp_fn, pp_datum );
398 return 0;
399}
400static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock( this ); }
401
402//-----------------------------------------------------------------------------
403// Fast Block Lock
404
405// minimal blocking lock
406// - No reacquire for cond var
407// - No recursive acquisition
408// - No ownership
409struct fast_block_lock {
410 // List of blocked threads
411 dlist( thread$ ) blocked_threads;
412
413 // Spin lock used for mutual exclusion
414 __spinlock_t lock;
415
416 // flag showing if lock is held
417 bool held:1;
418};
419
420static inline void ?{}( fast_block_lock & this ) with(this) {
421 lock{};
422 blocked_threads{};
423 held = false;
424}
425static inline void ^?{}( fast_block_lock & this ) {}
426static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
427static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
428
429// if this is called recursively IT WILL DEADLOCK!!!!!
430static inline void lock( fast_block_lock & this ) with(this) {
431 lock( lock __cfaabi_dbg_ctx2 );
432 if ( held ) {
433 insert_last( blocked_threads, *active_thread() );
434 unlock( lock );
435 park( );
436 return;
437 }
438 held = true;
439 unlock( lock );
440}
441
442static inline void unlock( fast_block_lock & this ) with(this) {
443 lock( lock __cfaabi_dbg_ctx2 );
444 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
445 thread$ * t = &try_pop_front( blocked_threads );
446 held = ( t ? true : false );
447 unpark( t );
448 unlock( lock );
449}
450
451static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {
452 lock( lock __cfaabi_dbg_ctx2 );
453 insert_last( blocked_threads, *t );
454 unlock( lock );
455}
456static inline size_t on_wait( fast_block_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) {
457 unlock( this );
458 pre_park_then_park( pp_fn, pp_datum );
459 return 0;
460}
461static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { }
462
463//-----------------------------------------------------------------------------
464// simple_owner_lock
465
466// pthread owner lock
467// - reacquire for cond var
468// - recursive acquisition
469// - ownership
470struct simple_owner_lock {
471 // List of blocked threads
472 dlist( select_node ) blocked_threads;
473
474 // Spin lock used for mutual exclusion
475 __spinlock_t lock;
476
477 // owner showing if lock is held
478 struct thread$ * owner;
479
480 size_t recursion_count;
481};
482
483static inline void ?{}( simple_owner_lock & this ) with(this) {
484 lock{};
485 blocked_threads{};
486 owner = 0p;
487 recursion_count = 0;
488}
489static inline void ^?{}( simple_owner_lock & this ) {}
490static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
491static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
492
493static inline void lock( simple_owner_lock & this ) with(this) {
494 if ( owner == active_thread() ) {
495 recursion_count++;
496 return;
497 }
498 lock( lock __cfaabi_dbg_ctx2 );
499
500 if ( owner != 0p ) {
501 select_node node;
502 insert_last( blocked_threads, node );
503 unlock( lock );
504 park( );
505 return;
506 }
507 owner = active_thread();
508 recursion_count = 1;
509 unlock( lock );
510}
511
512static inline void pop_node( simple_owner_lock & this ) with(this) {
513 __handle_waituntil_OR( blocked_threads );
514 select_node * node = &try_pop_front( blocked_threads );
515 if ( node ) {
516 owner = node->blocked_thread;
517 recursion_count = 1;
518 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
519 wake_one( blocked_threads, *node );
520 } else {
521 owner = 0p;
522 recursion_count = 0;
523 }
524}
525
526static inline void unlock( simple_owner_lock & this ) with(this) {
527 lock( lock __cfaabi_dbg_ctx2 );
528 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
529 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
530 // if recursion count is zero release lock and set new owner if one is waiting
531 recursion_count--;
532 if ( recursion_count == 0 ) {
533 pop_node( this );
534 }
535 unlock( lock );
536}
537
538static inline void on_notify( simple_owner_lock & this, thread$ * t ) with(this) {
539 lock( lock __cfaabi_dbg_ctx2 );
540 // lock held
541 if ( owner != 0p ) {
542 insert_last( blocked_threads, *(select_node *)t->link_node );
543 }
544 // lock not held
545 else {
546 owner = t;
547 recursion_count = 1;
548 unpark( t );
549 }
550 unlock( lock );
551}
552
553static inline size_t on_wait( simple_owner_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) with(this) {
554 lock( lock __cfaabi_dbg_ctx2 );
555 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
556 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
557
558 size_t ret = recursion_count;
559
560 pop_node( this );
561
562 select_node node;
563 active_thread()->link_node = (void *)&node;
564 unlock( lock );
565
566 pre_park_then_park( pp_fn, pp_datum );
567
568 return ret;
569}
570
571static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
572
573// waituntil() support
574static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) {
575 lock( lock __cfaabi_dbg_ctx2 );
576
577 // check if we can complete operation. If so race to establish winner in special OR case
578 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
579 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
580 unlock( lock );
581 return false;
582 }
583 }
584
585 if ( owner == active_thread() ) {
586 recursion_count++;
587 if ( node.park_counter ) __make_select_node_available( node );
588 unlock( lock );
589 return true;
590 }
591
592 if ( owner != 0p ) {
593 insert_last( blocked_threads, node );
594 unlock( lock );
595 return false;
596 }
597
598 owner = active_thread();
599 recursion_count = 1;
600
601 if ( node.park_counter ) __make_select_node_available( node );
602 unlock( lock );
603 return true;
604}
605
606static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) {
607 lock( lock __cfaabi_dbg_ctx2 );
608 if ( node`isListed ) {
609 remove( node );
610 unlock( lock );
611 return false;
612 }
613
614 if ( owner == active_thread() ) {
615 recursion_count--;
616 if ( recursion_count == 0 ) {
617 pop_node( this );
618 }
619 }
620 unlock( lock );
621 return false;
622}
623
624static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; }
625
626
627//-----------------------------------------------------------------------------
628// Spin Queue Lock
629
630// - No reacquire for cond var
631// - No recursive acquisition
632// - No ownership
633// - spin lock with no locking/atomics in unlock
634struct spin_queue_lock {
635 // Spin lock used for mutual exclusion
636 mcs_spin_lock lock;
637
638 // flag showing if lock is held
639 volatile bool held;
640};
641
642static inline void ?{}( spin_queue_lock & this ) with(this) {
643 lock{};
644 held = false;
645}
646static inline void ^?{}( spin_queue_lock & this ) {}
647static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
648static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
649
650// if this is called recursively IT WILL DEADLOCK!
651static inline void lock( spin_queue_lock & this ) with(this) {
652 mcs_spin_node node;
653 lock( lock, node );
654 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
655 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
656 unlock( lock, node );
657}
658
659static inline void unlock( spin_queue_lock & this ) with(this) {
660 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
661}
662
663static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) {
664 unpark(t);
665}
666static inline size_t on_wait( spin_queue_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) {
667 unlock( this );
668 pre_park_then_park( pp_fn, pp_datum );
669 return 0;
670}
671static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock( this ); }
672
673
674//-----------------------------------------------------------------------------
675// MCS Block Spin Lock
676
677// - No reacquire for cond var
678// - No recursive acquisition
679// - No ownership
680// - Blocks but first node spins (like spin queue but blocking for not first thd)
681struct mcs_block_spin_lock {
682 // Spin lock used for mutual exclusion
683 mcs_lock lock;
684
685 // flag showing if lock is held
686 volatile bool held;
687};
688
689static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
690 lock{};
691 held = false;
692}
693static inline void ^?{}( mcs_block_spin_lock & this ) {}
694static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
695static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
696
697// if this is called recursively IT WILL DEADLOCK!!!!!
698static inline void lock( mcs_block_spin_lock & this ) with(this) {
699 mcs_node node;
700 lock( lock, node );
701 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
702 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
703 unlock( lock, node );
704}
705
706static inline void unlock(mcs_block_spin_lock & this) with(this) {
707 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
708}
709
710static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark( t ); }
711static inline size_t on_wait( mcs_block_spin_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) {
712 unlock( this );
713 pre_park_then_park( pp_fn, pp_datum );
714 return 0;
715}
716static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock( this ); }
717
718//-----------------------------------------------------------------------------
719// Block Spin Lock
720
721// - No reacquire for cond var
722// - No recursive acquisition
723// - No ownership
724// - Blocks but first node spins (like spin queue but blocking for not first thd)
725struct block_spin_lock {
726 // Spin lock used for mutual exclusion
727 fast_block_lock lock;
728
729 // flag showing if lock is held
730 volatile bool held;
731};
732
733static inline void ?{}( block_spin_lock & this ) with(this) {
734 lock{};
735 held = false;
736}
737static inline void ^?{}( block_spin_lock & this ) {}
738static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
739static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
740
741// if this is called recursively IT WILL DEADLOCK!!!!!
742static inline void lock( block_spin_lock & this ) with(this) {
743 lock( lock );
744 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
745 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
746 unlock( lock );
747}
748
749static inline void unlock( block_spin_lock & this ) with(this) {
750 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
751}
752
753static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {
754 // first we acquire internal fast_block_lock
755 lock( lock __cfaabi_dbg_ctx2 );
756 if ( held ) { // if internal fast_block_lock is held
757 insert_last( blocked_threads, *t );
758 unlock( lock );
759 return;
760 }
761 // if internal fast_block_lock is not held
762 held = true;
763 unlock( lock );
764
765 unpark(t);
766}
767static inline size_t on_wait( block_spin_lock & this, __cfa_pre_park pp_fn, void * pp_datum ) {
768 unlock( this );
769 pre_park_then_park( pp_fn, pp_datum );
770 return 0;
771}
772static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {
773 // now we acquire the entire block_spin_lock upon waking up
774 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
775 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
776 unlock( lock ); // Now we release the internal fast_spin_lock
777}
778
779//-----------------------------------------------------------------------------
780// // info_thread
781// // the info thread is a wrapper around a thread used
782// // to store extra data for use in the condition variable
783forall(L & | is_blocking_lock(L)) {
784 struct info_thread;
785}
786
787//-----------------------------------------------------------------------------
788// Synchronization Locks
789forall(L & | is_blocking_lock(L)) {
790
791 //-----------------------------------------------------------------------------
792 // condition_variable
793
794 // The multi-tool condition variable
795 // - can pass timeouts to wait for either a signal or timeout
796 // - can wait without passing a lock
797 // - can have waiters reacquire different locks while waiting on the same cond var
798 // - has shadow queue
799 // - can be signalled outside of critical sections with no locks held
800 struct condition_variable {
801 // Spin lock used for mutual exclusion
802 __spinlock_t lock;
803
804 // List of blocked threads
805 dlist( info_thread(L) ) blocked_threads;
806
807 // Count of current blocked threads
808 int count;
809 };
810
811
812 void ?{}( condition_variable(L) & this );
813 void ^?{}( condition_variable(L) & this );
814
815 bool notify_one( condition_variable(L) & this );
816 bool notify_all( condition_variable(L) & this );
817
818 uintptr_t front( condition_variable(L) & this );
819
820 bool empty ( condition_variable(L) & this );
821 int counter( condition_variable(L) & this );
822
823 void wait( condition_variable(L) & this );
824 void wait( condition_variable(L) & this, uintptr_t info );
825 bool wait( condition_variable(L) & this, Duration duration );
826 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
827
828 void wait( condition_variable(L) & this, L & l );
829 void wait( condition_variable(L) & this, L & l, uintptr_t info );
830 bool wait( condition_variable(L) & this, L & l, Duration duration );
831 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
832
833 //-----------------------------------------------------------------------------
834 // fast_cond_var
835
836 // The trimmed and slim condition variable
837 // - no internal lock so you must hold a lock while using this cond var
838 // - signalling without holding branded lock is UNSAFE!
839 // - only allows usage of one lock, cond var is branded after usage
840
841 struct fast_cond_var {
842 // List of blocked threads
843 dlist( info_thread(L) ) blocked_threads;
844 #ifdef __CFA_DEBUG__
845 L * lock_used;
846 #endif
847 };
848
849 void ?{}( fast_cond_var(L) & this );
850 void ^?{}( fast_cond_var(L) & this );
851
852 bool notify_one( fast_cond_var(L) & this );
853 bool notify_all( fast_cond_var(L) & this );
854
855 uintptr_t front( fast_cond_var(L) & this );
856 bool empty ( fast_cond_var(L) & this );
857
858 void wait( fast_cond_var(L) & this, L & l );
859 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
860
861
862 //-----------------------------------------------------------------------------
863 // pthread_cond_var
864 //
865 // - cond var with minimal footprint
866 // - supports operations needed for phthread cond
867
868 struct pthread_cond_var {
869 dlist( info_thread(L) ) blocked_threads;
870 __spinlock_t lock;
871 };
872
873 void ?{}( pthread_cond_var(L) & this );
874 void ^?{}( pthread_cond_var(L) & this );
875
876 bool notify_one( pthread_cond_var(L) & this );
877 bool notify_all( pthread_cond_var(L) & this );
878
879 uintptr_t front( pthread_cond_var(L) & this );
880 bool empty ( pthread_cond_var(L) & this );
881
882 void wait( pthread_cond_var(L) & this, L & l );
883 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
884 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
885 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
886}
Note: See TracBrowser for help on using the repository browser.