source: libcfa/src/concurrency/locks.hfa@ e2f827f

ADT ast-experimental
Last change on this file since e2f827f was a45e21c, checked in by caparson <caparson@…>, 2 years ago

cleaned up channel, added safety/productivity features to channels. added go style spin/block lock. small cleanup in locks.hfa

  • Property mode set to 100644
File size: 26.1 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include <fstream.hfa>
33
34// futex headers
35#include <linux/futex.h> /* Definition of FUTEX_* constants */
36#include <sys/syscall.h> /* Definition of SYS_* constants */
37#include <unistd.h>
38
39// C_TODO: cleanup this and locks.cfa
40// - appropriate separation of interface and impl
41// - clean up unused/unneeded locks
42// - change messy big blocking lock from inheritance to composition to remove need for flags
43
44//-----------------------------------------------------------------------------
45// Semaphore
46struct semaphore {
47 __spinlock_t lock;
48 int count;
49 __queue_t(thread$) waiting;
50};
51
52void ?{}(semaphore & this, int count = 1);
53void ^?{}(semaphore & this);
54bool P (semaphore & this);
55bool V (semaphore & this);
56bool V (semaphore & this, unsigned count);
57thread$ * V (semaphore & this, bool );
58
59//----------
60struct single_acquisition_lock {
61 inline blocking_lock;
62};
63
64static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
65static inline void ^?{}( single_acquisition_lock & this ) {}
66static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
67static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
68static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
69static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
70static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
71static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
72
73//----------
74struct owner_lock {
75 inline blocking_lock;
76};
77
78static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
79static inline void ^?{}( owner_lock & this ) {}
80static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
81static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
82static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
83static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
84static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
85static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
86
87//-----------------------------------------------------------------------------
88// MCS Lock
89struct mcs_node {
90 mcs_node * volatile next;
91 single_sem sem;
92};
93
94static inline void ?{}(mcs_node & this) { this.next = 0p; }
95
96static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
97 return node->next;
98}
99
100struct mcs_lock {
101 mcs_queue(mcs_node) queue;
102};
103
104static inline void lock(mcs_lock & l, mcs_node & n) {
105 if(push(l.queue, &n))
106 wait(n.sem);
107}
108
109static inline void unlock(mcs_lock & l, mcs_node & n) {
110 mcs_node * next = advance(l.queue, &n);
111 if(next) post(next->sem);
112}
113
114//-----------------------------------------------------------------------------
115// MCS Spin Lock
116// - No recursive acquisition
117// - Needs to be released by owner
118
119struct mcs_spin_node {
120 mcs_spin_node * volatile next;
121 volatile bool locked;
122};
123
124struct mcs_spin_queue {
125 mcs_spin_node * volatile tail;
126};
127
128static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
129
130static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
131 return node->next;
132}
133
134struct mcs_spin_lock {
135 mcs_spin_queue queue;
136};
137
138static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
139 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
140 n.locked = true;
141 if(prev == 0p) return;
142 prev->next = &n;
143 while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
144}
145
146static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
147 mcs_spin_node * n_ptr = &n;
148 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
149 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
150 n.next->locked = false;
151}
152
153//-----------------------------------------------------------------------------
154// futex_mutex
155
156// - Kernel thd blocking alternative to the spinlock
157// - No ownership (will deadlock on reacq)
158struct futex_mutex {
159 // lock state any state other than UNLOCKED is locked
160 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
161
162 // stores a lock state
163 int val;
164};
165
166// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
167static inline int futex(int *uaddr, int futex_op, int val) {
168 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
169}
170
171static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
172
173static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
174 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
175}
176
177static inline int internal_exchange(futex_mutex & this) with(this) {
178 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
179}
180
181// if this is called recursively IT WILL DEADLOCK!!!!!
182static inline void lock(futex_mutex & this) with(this) {
183 int state;
184
185 for( int spin = 4; spin < 1024; spin += spin) {
186 state = 0;
187 // if unlocked, lock and return
188 if (internal_try_lock(this, state)) return;
189 if (2 == state) break;
190 for (int i = 0; i < spin; i++) Pause();
191 }
192
193 // // no contention try to acquire
194 // if (internal_try_lock(this, state)) return;
195
196 // if not in contended state, set to be in contended state
197 if (state != 2) state = internal_exchange(this);
198
199 // block and spin until we win the lock
200 while (state != 0) {
201 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
202 state = internal_exchange(this);
203 }
204}
205
206static inline void unlock(futex_mutex & this) with(this) {
207 // if uncontended do atomic unlock and then return
208 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
209
210 // otherwise threads are blocked so we must wake one
211 futex((int *)&val, FUTEX_WAKE, 1);
212}
213
214static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
215static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
216
217// to set recursion count after getting signalled;
218static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
219
220//-----------------------------------------------------------------------------
221// go_mutex
222
223// - Kernel thd blocking alternative to the spinlock
224// - No ownership (will deadlock on reacq)
225// - Golang's flavour of mutex
226// - Impl taken from Golang: src/runtime/lock_futex.go
227struct go_mutex {
228 // lock state any state other than UNLOCKED is locked
229 // enum LockState { UNLOCKED = 0, LOCKED = 1, SLEEPING = 2 };
230
231 // stores a lock state
232 int val;
233};
234
235static inline void ?{}( go_mutex & this ) with(this) { val = 0; }
236
237static inline bool internal_try_lock(go_mutex & this, int & compare_val, int new_val ) with(this) {
238 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, new_val, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
239}
240
241static inline int internal_exchange(go_mutex & this, int swap ) with(this) {
242 return __atomic_exchange_n((int*)&val, swap, __ATOMIC_ACQUIRE);
243}
244
245const int __go_mtx_spins = 4;
246const int __go_mtx_pauses = 30;
247// if this is called recursively IT WILL DEADLOCK!!!!!
248static inline void lock(go_mutex & this) with(this) {
249 int state, init_state;
250
251 // speculative grab
252 state = internal_exchange(this, 1);
253 if ( !state ) return; // state == 0
254 init_state = state;
255 for (;;) {
256 for( int i = 0; i < __go_mtx_spins; i++ ) {
257 while( !val ) { // lock unlocked
258 state = 0;
259 if (internal_try_lock(this, state, init_state)) return;
260 }
261 for (int i = 0; i < __go_mtx_pauses; i++) Pause();
262 }
263
264 while( !val ) { // lock unlocked
265 state = 0;
266 if (internal_try_lock(this, state, init_state)) return;
267 }
268 sched_yield();
269
270 // if not in contended state, set to be in contended state
271 state = internal_exchange(this, 2);
272 if ( !state ) return; // state == 0
273 init_state = 2;
274 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
275 }
276}
277
278static inline void unlock( go_mutex & this ) with(this) {
279 // if uncontended do atomic unlock and then return
280 if (__atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;
281
282 // otherwise threads are blocked so we must wake one
283 futex((int *)&val, FUTEX_WAKE, 1);
284}
285
286static inline void on_notify( go_mutex & f, thread$ * t){ unpark(t); }
287static inline size_t on_wait( go_mutex & f ) {unlock(f); return 0;}
288static inline void on_wakeup( go_mutex & f, size_t recursion ) {}
289
290//-----------------------------------------------------------------------------
291// CLH Spinlock
292// - No recursive acquisition
293// - Needs to be released by owner
294
295struct clh_lock {
296 volatile bool * volatile tail;
297 volatile bool * volatile head;
298};
299
300static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
301static inline void ^?{}( clh_lock & this ) { free(this.tail); }
302
303static inline void lock(clh_lock & l) {
304 thread$ * curr_thd = active_thread();
305 *(curr_thd->clh_node) = false;
306 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
307 while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
308 __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
309 curr_thd->clh_node = prev;
310}
311
312static inline void unlock(clh_lock & l) {
313 __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
314}
315
316static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
317static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
318static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }
319
320//-----------------------------------------------------------------------------
321// Exponential backoff then block lock
322struct exp_backoff_then_block_lock {
323 // Spin lock used for mutual exclusion
324 __spinlock_t spinlock;
325
326 // List of blocked threads
327 dlist( thread$ ) blocked_threads;
328
329 // Used for comparing and exchanging
330 volatile size_t lock_value;
331};
332
333static inline void ?{}( exp_backoff_then_block_lock & this ) {
334 this.spinlock{};
335 this.blocked_threads{};
336 this.lock_value = 0;
337}
338
339static inline void ^?{}( exp_backoff_then_block_lock & this ){}
340
341static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {
342 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
343}
344
345static inline bool try_lock(exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
346
347static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) {
348 return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE);
349}
350
351static inline bool block(exp_backoff_then_block_lock & this) with(this) {
352 lock( spinlock __cfaabi_dbg_ctx2 );
353 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) {
354 unlock( spinlock );
355 return true;
356 }
357 insert_last( blocked_threads, *active_thread() );
358 unlock( spinlock );
359 park( );
360 return true;
361}
362
363static inline void lock(exp_backoff_then_block_lock & this) with(this) {
364 size_t compare_val = 0;
365 int spin = 4;
366
367 // linear backoff
368 for( ;; ) {
369 compare_val = 0;
370 if (internal_try_lock(this, compare_val)) return;
371 if (2 == compare_val) break;
372 for (int i = 0; i < spin; i++) Pause();
373 if (spin >= 1024) break;
374 spin += spin;
375 }
376
377 if(2 != compare_val && try_lock_contention(this)) return;
378 // block until signalled
379 while (block(this)) if(try_lock_contention(this)) return;
380}
381
382static inline void unlock(exp_backoff_then_block_lock & this) with(this) {
383 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
384 lock( spinlock __cfaabi_dbg_ctx2 );
385 thread$ * t = &try_pop_front( blocked_threads );
386 unlock( spinlock );
387 unpark( t );
388}
389
390static inline void on_notify(exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
391static inline size_t on_wait(exp_backoff_then_block_lock & this) { unlock(this); return 0; }
392static inline void on_wakeup(exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }
393
394//-----------------------------------------------------------------------------
395// Fast Block Lock
396
397// minimal blocking lock
398// - No reacquire for cond var
399// - No recursive acquisition
400// - No ownership
401struct fast_block_lock {
402 // List of blocked threads
403 dlist( thread$ ) blocked_threads;
404
405 // Spin lock used for mutual exclusion
406 __spinlock_t lock;
407
408 // flag showing if lock is held
409 bool held:1;
410};
411
412static inline void ?{}( fast_block_lock & this ) with(this) {
413 lock{};
414 blocked_threads{};
415 held = false;
416}
417static inline void ^?{}( fast_block_lock & this ) {}
418static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
419static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
420
421// if this is called recursively IT WILL DEADLOCK!!!!!
422static inline void lock(fast_block_lock & this) with(this) {
423 lock( lock __cfaabi_dbg_ctx2 );
424 if ( held ) {
425 insert_last( blocked_threads, *active_thread() );
426 unlock( lock );
427 park( );
428 return;
429 }
430 held = true;
431 unlock( lock );
432}
433
434static inline void unlock(fast_block_lock & this) with(this) {
435 lock( lock __cfaabi_dbg_ctx2 );
436 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
437 thread$ * t = &try_pop_front( blocked_threads );
438 held = ( t ? true : false );
439 unpark( t );
440 unlock( lock );
441}
442
443static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
444 lock( lock __cfaabi_dbg_ctx2 );
445 insert_last( blocked_threads, *t );
446 unlock( lock );
447}
448static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
449static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
450
451//-----------------------------------------------------------------------------
452// simple_owner_lock
453
454// pthread owner lock
455// - reacquire for cond var
456// - recursive acquisition
457// - ownership
458struct simple_owner_lock {
459 // List of blocked threads
460 dlist( thread$ ) blocked_threads;
461
462 // Spin lock used for mutual exclusion
463 __spinlock_t lock;
464
465 // owner showing if lock is held
466 struct thread$ * owner;
467
468 size_t recursion_count;
469};
470
471static inline void ?{}( simple_owner_lock & this ) with(this) {
472 lock{};
473 blocked_threads{};
474 owner = 0p;
475 recursion_count = 0;
476}
477static inline void ^?{}( simple_owner_lock & this ) {}
478static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
479static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
480
481static inline void lock(simple_owner_lock & this) with(this) {
482 if (owner == active_thread()) {
483 recursion_count++;
484 return;
485 }
486 lock( lock __cfaabi_dbg_ctx2 );
487
488 if (owner != 0p) {
489 insert_last( blocked_threads, *active_thread() );
490 unlock( lock );
491 park( );
492 return;
493 }
494 owner = active_thread();
495 recursion_count = 1;
496 unlock( lock );
497}
498
499// TODO: fix duplicate def issue and bring this back
500// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
501 // thread$ * t = &try_pop_front( blocked_threads );
502 // owner = t;
503 // recursion_count = ( t ? 1 : 0 );
504 // unpark( t );
505// }
506
507static inline void unlock(simple_owner_lock & this) with(this) {
508 lock( lock __cfaabi_dbg_ctx2 );
509 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
510 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
511 // if recursion count is zero release lock and set new owner if one is waiting
512 recursion_count--;
513 if ( recursion_count == 0 ) {
514 // pop_and_set_new_owner( this );
515 thread$ * t = &try_pop_front( blocked_threads );
516 owner = t;
517 recursion_count = ( t ? 1 : 0 );
518 unpark( t );
519 }
520 unlock( lock );
521}
522
523static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
524 lock( lock __cfaabi_dbg_ctx2 );
525 // lock held
526 if ( owner != 0p ) {
527 insert_last( blocked_threads, *t );
528 }
529 // lock not held
530 else {
531 owner = t;
532 recursion_count = 1;
533 unpark( t );
534 }
535 unlock( lock );
536}
537
538static inline size_t on_wait(simple_owner_lock & this) with(this) {
539 lock( lock __cfaabi_dbg_ctx2 );
540 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
541 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
542
543 size_t ret = recursion_count;
544
545 // pop_and_set_new_owner( this );
546
547 thread$ * t = &try_pop_front( blocked_threads );
548 owner = t;
549 recursion_count = ( t ? 1 : 0 );
550 unpark( t );
551
552 unlock( lock );
553 return ret;
554}
555
556static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
557
558//-----------------------------------------------------------------------------
559// Spin Queue Lock
560
561// - No reacquire for cond var
562// - No recursive acquisition
563// - No ownership
564// - spin lock with no locking/atomics in unlock
565struct spin_queue_lock {
566 // Spin lock used for mutual exclusion
567 mcs_spin_lock lock;
568
569 // flag showing if lock is held
570 volatile bool held;
571};
572
573static inline void ?{}( spin_queue_lock & this ) with(this) {
574 lock{};
575 held = false;
576}
577static inline void ^?{}( spin_queue_lock & this ) {}
578static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
579static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
580
581// if this is called recursively IT WILL DEADLOCK!
582static inline void lock(spin_queue_lock & this) with(this) {
583 mcs_spin_node node;
584 lock( lock, node );
585 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
586 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
587 unlock( lock, node );
588}
589
590static inline void unlock(spin_queue_lock & this) with(this) {
591 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
592}
593
594static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
595 unpark(t);
596}
597static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
598static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { lock(this); }
599
600
601//-----------------------------------------------------------------------------
602// MCS Block Spin Lock
603
604// - No reacquire for cond var
605// - No recursive acquisition
606// - No ownership
607// - Blocks but first node spins (like spin queue but blocking for not first thd)
608struct mcs_block_spin_lock {
609 // Spin lock used for mutual exclusion
610 mcs_lock lock;
611
612 // flag showing if lock is held
613 volatile bool held;
614};
615
616static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
617 lock{};
618 held = false;
619}
620static inline void ^?{}( mcs_block_spin_lock & this ) {}
621static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
622static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
623
624// if this is called recursively IT WILL DEADLOCK!!!!!
625static inline void lock(mcs_block_spin_lock & this) with(this) {
626 mcs_node node;
627 lock( lock, node );
628 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
629 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
630 unlock( lock, node );
631}
632
633static inline void unlock(mcs_block_spin_lock & this) with(this) {
634 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
635}
636
637static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
638static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
639static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {lock(this); }
640
641//-----------------------------------------------------------------------------
642// Block Spin Lock
643
644// - No reacquire for cond var
645// - No recursive acquisition
646// - No ownership
647// - Blocks but first node spins (like spin queue but blocking for not first thd)
648struct block_spin_lock {
649 // Spin lock used for mutual exclusion
650 fast_block_lock lock;
651
652 // flag showing if lock is held
653 volatile bool held;
654};
655
656static inline void ?{}( block_spin_lock & this ) with(this) {
657 lock{};
658 held = false;
659}
660static inline void ^?{}( block_spin_lock & this ) {}
661static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
662static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
663
664// if this is called recursively IT WILL DEADLOCK!!!!!
665static inline void lock(block_spin_lock & this) with(this) {
666 lock( lock );
667 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
668 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
669 unlock( lock );
670}
671
672static inline void unlock(block_spin_lock & this) with(this) {
673 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
674}
675
676static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
677 // first we acquire internal fast_block_lock
678 lock( lock __cfaabi_dbg_ctx2 );
679 if ( held ) { // if internal fast_block_lock is held
680 insert_last( blocked_threads, *t );
681 unlock( lock );
682 return;
683 }
684 // if internal fast_block_lock is not held
685 held = true;
686 unlock( lock );
687
688 unpark(t);
689}
690static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
691static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
692 // now we acquire the entire block_spin_lock upon waking up
693 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
694 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
695 unlock( lock ); // Now we release the internal fast_spin_lock
696}
697
698//-----------------------------------------------------------------------------
699// is_blocking_lock
700forall( L & | sized(L) )
701trait is_blocking_lock {
702 // For synchronization locks to use when acquiring
703 void on_notify( L &, struct thread$ * );
704
705 // For synchronization locks to use when releasing
706 size_t on_wait( L & );
707
708 // to set recursion count after getting signalled;
709 void on_wakeup( L &, size_t recursion );
710};
711
712//-----------------------------------------------------------------------------
713// // info_thread
714// // the info thread is a wrapper around a thread used
715// // to store extra data for use in the condition variable
716forall(L & | is_blocking_lock(L)) {
717 struct info_thread;
718
719 // // for use by sequence
720 // info_thread(L) *& Back( info_thread(L) * this );
721 // info_thread(L) *& Next( info_thread(L) * this );
722}
723
724//-----------------------------------------------------------------------------
725// Synchronization Locks
726forall(L & | is_blocking_lock(L)) {
727
728 //-----------------------------------------------------------------------------
729 // condition_variable
730
731 // The multi-tool condition variable
732 // - can pass timeouts to wait for either a signal or timeout
733 // - can wait without passing a lock
734 // - can have waiters reacquire different locks while waiting on the same cond var
735 // - has shadow queue
736 // - can be signalled outside of critical sections with no locks held
737 struct condition_variable {
738 // Spin lock used for mutual exclusion
739 __spinlock_t lock;
740
741 // List of blocked threads
742 dlist( info_thread(L) ) blocked_threads;
743
744 // Count of current blocked threads
745 int count;
746 };
747
748
749 void ?{}( condition_variable(L) & this );
750 void ^?{}( condition_variable(L) & this );
751
752 bool notify_one( condition_variable(L) & this );
753 bool notify_all( condition_variable(L) & this );
754
755 uintptr_t front( condition_variable(L) & this );
756
757 bool empty ( condition_variable(L) & this );
758 int counter( condition_variable(L) & this );
759
760 void wait( condition_variable(L) & this );
761 void wait( condition_variable(L) & this, uintptr_t info );
762 bool wait( condition_variable(L) & this, Duration duration );
763 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
764
765 void wait( condition_variable(L) & this, L & l );
766 void wait( condition_variable(L) & this, L & l, uintptr_t info );
767 bool wait( condition_variable(L) & this, L & l, Duration duration );
768 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
769
770 //-----------------------------------------------------------------------------
771 // fast_cond_var
772
773 // The trimmed and slim condition variable
774 // - no internal lock so you must hold a lock while using this cond var
775 // - signalling without holding branded lock is UNSAFE!
776 // - only allows usage of one lock, cond var is branded after usage
777
778 struct fast_cond_var {
779 // List of blocked threads
780 dlist( info_thread(L) ) blocked_threads;
781 #ifdef __CFA_DEBUG__
782 L * lock_used;
783 #endif
784 };
785
786 void ?{}( fast_cond_var(L) & this );
787 void ^?{}( fast_cond_var(L) & this );
788
789 bool notify_one( fast_cond_var(L) & this );
790 bool notify_all( fast_cond_var(L) & this );
791
792 uintptr_t front( fast_cond_var(L) & this );
793 bool empty ( fast_cond_var(L) & this );
794
795 void wait( fast_cond_var(L) & this, L & l );
796 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
797
798
799 //-----------------------------------------------------------------------------
800 // pthread_cond_var
801 //
802 // - cond var with minimal footprint
803 // - supports operations needed for phthread cond
804
805 struct pthread_cond_var {
806 dlist( info_thread(L) ) blocked_threads;
807 __spinlock_t lock;
808 };
809
810 void ?{}( pthread_cond_var(L) & this );
811 void ^?{}( pthread_cond_var(L) & this );
812
813 bool notify_one( pthread_cond_var(L) & this );
814 bool notify_all( pthread_cond_var(L) & this );
815
816 uintptr_t front( pthread_cond_var(L) & this );
817 bool empty ( pthread_cond_var(L) & this );
818
819 void wait( pthread_cond_var(L) & this, L & l );
820 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
821 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
822 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
823}
Note: See TracBrowser for help on using the repository browser.