source: libcfa/src/concurrency/locks.hfa@ 331ee52c

ADT ast-experimental
Last change on this file since 331ee52c was 1ab773e0, checked in by caparson <caparson@…>, 3 years ago

more lock cleanup

  • Property mode set to 100644
File size: 24.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author : Colby Alexander Parsons
11// Created On : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count :
15//
16
17#pragma once
18
19#include <stdbool.h>
20#include <stdio.h>
21
22#include "bits/weakso_locks.hfa"
23#include "containers/lockfree.hfa"
24#include "containers/list.hfa"
25
26#include "limits.hfa"
27#include "thread.hfa"
28
29#include "time_t.hfa"
30#include "time.hfa"
31
32#include <fstream.hfa>
33
34
35// futex headers
36#include <linux/futex.h> /* Definition of FUTEX_* constants */
37#include <sys/syscall.h> /* Definition of SYS_* constants */
38#include <unistd.h>
39
40// undef to make a number of the locks not reacquire upon waking from a condlock
41#define REACQ 1
42
43//-----------------------------------------------------------------------------
44// Semaphore
45struct semaphore {
46 __spinlock_t lock;
47 int count;
48 __queue_t(thread$) waiting;
49};
50
51void ?{}(semaphore & this, int count = 1);
52void ^?{}(semaphore & this);
53bool P (semaphore & this);
54bool V (semaphore & this);
55bool V (semaphore & this, unsigned count);
56thread$ * V (semaphore & this, bool );
57
58//----------
59struct single_acquisition_lock {
60 inline blocking_lock;
61};
62
63static inline void ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
64static inline void ^?{}( single_acquisition_lock & this ) {}
65static inline void lock ( single_acquisition_lock & this ) { lock ( (blocking_lock &)this ); }
66static inline bool try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
67static inline void unlock ( single_acquisition_lock & this ) { unlock ( (blocking_lock &)this ); }
68static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
69static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
70static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
71
72//----------
73struct owner_lock {
74 inline blocking_lock;
75};
76
77static inline void ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
78static inline void ^?{}( owner_lock & this ) {}
79static inline void lock ( owner_lock & this ) { lock ( (blocking_lock &)this ); }
80static inline bool try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
81static inline void unlock ( owner_lock & this ) { unlock ( (blocking_lock &)this ); }
82static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
83static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
84static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
85
86//-----------------------------------------------------------------------------
87// MCS Lock
88struct mcs_node {
89 mcs_node * volatile next;
90 single_sem sem;
91};
92
93static inline void ?{}(mcs_node & this) { this.next = 0p; }
94
95static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
96 return node->next;
97}
98
99struct mcs_lock {
100 mcs_queue(mcs_node) queue;
101};
102
103static inline void lock(mcs_lock & l, mcs_node & n) {
104 if(push(l.queue, &n))
105 wait(n.sem);
106}
107
108static inline void unlock(mcs_lock & l, mcs_node & n) {
109 mcs_node * next = advance(l.queue, &n);
110 if(next) post(next->sem);
111}
112
113//-----------------------------------------------------------------------------
114// MCS Spin Lock
115// - No recursive acquisition
116// - Needs to be released by owner
117
118struct mcs_spin_node {
119 mcs_spin_node * volatile next;
120 volatile bool locked;
121};
122
123struct mcs_spin_queue {
124 mcs_spin_node * volatile tail;
125};
126
127static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
128
129static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
130 return node->next;
131}
132
133struct mcs_spin_lock {
134 mcs_spin_queue queue;
135};
136
137static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
138 mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
139 n.locked = true;
140 if(prev == 0p) return;
141 prev->next = &n;
142 while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
143}
144
145static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
146 mcs_spin_node * n_ptr = &n;
147 if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
148 while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
149 n.next->locked = false;
150}
151
152//-----------------------------------------------------------------------------
153// futex_mutex
154
155// - No cond var support
156// - Kernel thd blocking alternative to the spinlock
157// - No ownership (will deadlock on reacq)
158struct futex_mutex {
159 // lock state any state other than UNLOCKED is locked
160 // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
161
162 // stores a lock state
163 int val;
164};
165
166// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
167static inline int futex(int *uaddr, int futex_op, int val) {
168 return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
169}
170
171static inline void ?{}( futex_mutex & this ) with(this) { val = 0; }
172
173static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
174 return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
175}
176
177static inline int internal_exchange(futex_mutex & this) with(this) {
178 return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
179}
180
181// if this is called recursively IT WILL DEADLOCK!!!!!
182static inline void lock(futex_mutex & this) with(this) {
183 int state;
184
185
186 // // linear backoff omitted for now
187 // for( int spin = 4; spin < 1024; spin += spin) {
188 // state = 0;
189 // // if unlocked, lock and return
190 // if (internal_try_lock(this, state)) return;
191 // if (2 == state) break;
192 // for (int i = 0; i < spin; i++) Pause();
193 // }
194
195 // no contention try to acquire
196 if (internal_try_lock(this, state)) return;
197
198 // if not in contended state, set to be in contended state
199 if (state != 2) state = internal_exchange(this);
200
201 // block and spin until we win the lock
202 while (state != 0) {
203 futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
204 state = internal_exchange(this);
205 }
206}
207
208static inline void unlock(futex_mutex & this) with(this) {
209 // if uncontended do atomice unlock and then return
210 if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel
211
212 // otherwise threads are blocked so we must wake one
213 __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);
214 futex((int *)&val, FUTEX_WAKE, 1);
215}
216
217static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
218static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
219
220// to set recursion count after getting signalled;
221static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
222
223//-----------------------------------------------------------------------------
224// CLH Spinlock
225// - No recursive acquisition
226// - Needs to be released by owner
227
228struct clh_lock {
229 volatile bool * volatile tail;
230};
231
232static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
233static inline void ^?{}( clh_lock & this ) { free(this.tail); }
234
235static inline void lock(clh_lock & l) {
236 thread$ * curr_thd = active_thread();
237 *(curr_thd->clh_node) = false;
238 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
239 while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
240 curr_thd->clh_prev = prev;
241}
242
243static inline void unlock(clh_lock & l) {
244 thread$ * curr_thd = active_thread();
245 __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
246 curr_thd->clh_node = curr_thd->clh_prev;
247}
248
249static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
250static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
251static inline void on_wakeup(clh_lock & this, size_t recursion ) {
252 #ifdef REACQ
253 lock(this);
254 #endif
255}
256
257
258//-----------------------------------------------------------------------------
259// Linear backoff Spinlock
260struct linear_backoff_then_block_lock {
261 // Spin lock used for mutual exclusion
262 __spinlock_t spinlock;
263
264 // List of blocked threads
265 dlist( thread$ ) blocked_threads;
266
267 // Used for comparing and exchanging
268 volatile size_t lock_value;
269};
270
271static inline void ?{}( linear_backoff_then_block_lock & this ) {
272 this.spinlock{};
273 this.blocked_threads{};
274 this.lock_value = 0;
275}
276static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
277// static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
278// static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
279
280static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
281 if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
282 return true;
283 }
284 return false;
285}
286
287static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
288
289static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
290 if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
291 return true;
292 }
293 return false;
294}
295
296static inline bool block(linear_backoff_then_block_lock & this) with(this) {
297 lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC)
298 if (lock_value != 2) {
299 unlock( spinlock );
300 return true;
301 }
302 insert_last( blocked_threads, *active_thread() );
303 unlock( spinlock );
304 park( );
305 return true;
306}
307
308static inline void lock(linear_backoff_then_block_lock & this) with(this) {
309 size_t compare_val = 0;
310 int spin = 4;
311 // linear backoff
312 for( ;; ) {
313 compare_val = 0;
314 if (internal_try_lock(this, compare_val)) return;
315 if (2 == compare_val) break;
316 for (int i = 0; i < spin; i++) Pause();
317 if (spin >= 1024) break;
318 spin += spin;
319 }
320
321 if(2 != compare_val && try_lock_contention(this)) return;
322 // block until signalled
323 while (block(this)) if(try_lock_contention(this)) return;
324}
325
326static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
327 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
328 lock( spinlock __cfaabi_dbg_ctx2 );
329 thread$ * t = &try_pop_front( blocked_threads );
330 unlock( spinlock );
331 unpark( t );
332}
333
334static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
335static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
336static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) {
337 #ifdef REACQ
338 lock(this);
339 #endif
340}
341
342//-----------------------------------------------------------------------------
343// Fast Block Lock
344
345// minimal blocking lock
346// - No reacquire for cond var
347// - No recursive acquisition
348// - No ownership
349struct fast_block_lock {
350 // List of blocked threads
351 dlist( thread$ ) blocked_threads;
352
353 // Spin lock used for mutual exclusion
354 __spinlock_t lock;
355
356 // flag showing if lock is held
357 bool held:1;
358};
359
360static inline void ?{}( fast_block_lock & this ) with(this) {
361 lock{};
362 blocked_threads{};
363 held = false;
364}
365static inline void ^?{}( fast_block_lock & this ) {}
366static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
367static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
368
369// if this is called recursively IT WILL DEADLOCK!!!!!
370static inline void lock(fast_block_lock & this) with(this) {
371 lock( lock __cfaabi_dbg_ctx2 );
372 if ( held ) {
373 insert_last( blocked_threads, *active_thread() );
374 unlock( lock );
375 park( );
376 return;
377 }
378 held = true;
379 unlock( lock );
380}
381
382static inline void unlock(fast_block_lock & this) with(this) {
383 lock( lock __cfaabi_dbg_ctx2 );
384 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
385 thread$ * t = &try_pop_front( blocked_threads );
386 held = ( t ? true : false );
387 unpark( t );
388 unlock( lock );
389}
390
391static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
392 #ifdef REACQ
393 lock( lock __cfaabi_dbg_ctx2 );
394 insert_last( blocked_threads, *t );
395 unlock( lock );
396 #else
397 unpark(t);
398 #endif
399}
400static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
401static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
402
403//-----------------------------------------------------------------------------
404// simple_owner_lock
405
406// pthread owner lock
407// - reacquire for cond var
408// - recursive acquisition
409// - ownership
410struct simple_owner_lock {
411 // List of blocked threads
412 dlist( thread$ ) blocked_threads;
413
414 // Spin lock used for mutual exclusion
415 __spinlock_t lock;
416
417 // owner showing if lock is held
418 struct thread$ * owner;
419
420 size_t recursion_count;
421};
422
423static inline void ?{}( simple_owner_lock & this ) with(this) {
424 lock{};
425 blocked_threads{};
426 owner = 0p;
427 recursion_count = 0;
428}
429static inline void ^?{}( simple_owner_lock & this ) {}
430static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
431static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
432
433static inline void lock(simple_owner_lock & this) with(this) {
434 if (owner == active_thread()) {
435 recursion_count++;
436 return;
437 }
438 lock( lock __cfaabi_dbg_ctx2 );
439
440 if (owner != 0p) {
441 insert_last( blocked_threads, *active_thread() );
442 unlock( lock );
443 park( );
444 return;
445 }
446 owner = active_thread();
447 recursion_count = 1;
448 unlock( lock );
449}
450
451// TODO: fix duplicate def issue and bring this back
452// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
453 // thread$ * t = &try_pop_front( blocked_threads );
454 // owner = t;
455 // recursion_count = ( t ? 1 : 0 );
456 // unpark( t );
457// }
458
459static inline void unlock(simple_owner_lock & this) with(this) {
460 lock( lock __cfaabi_dbg_ctx2 );
461 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
462 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
463 // if recursion count is zero release lock and set new owner if one is waiting
464 recursion_count--;
465 if ( recursion_count == 0 ) {
466 // pop_and_set_new_owner( this );
467 thread$ * t = &try_pop_front( blocked_threads );
468 owner = t;
469 recursion_count = ( t ? 1 : 0 );
470 unpark( t );
471 }
472 unlock( lock );
473}
474
475static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
476 lock( lock __cfaabi_dbg_ctx2 );
477 // lock held
478 if ( owner != 0p ) {
479 insert_last( blocked_threads, *t );
480 }
481 // lock not held
482 else {
483 owner = t;
484 recursion_count = 1;
485 unpark( t );
486 }
487 unlock( lock );
488}
489
490static inline size_t on_wait(simple_owner_lock & this) with(this) {
491 lock( lock __cfaabi_dbg_ctx2 );
492 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
493 /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
494
495 size_t ret = recursion_count;
496
497 // pop_and_set_new_owner( this );
498
499 thread$ * t = &try_pop_front( blocked_threads );
500 owner = t;
501 recursion_count = ( t ? 1 : 0 );
502 unpark( t );
503
504 unlock( lock );
505 return ret;
506}
507
508static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
509
510//-----------------------------------------------------------------------------
511// Spin Queue Lock
512
513// - No reacquire for cond var
514// - No recursive acquisition
515// - No ownership
516// - spin lock with no locking/atomics in unlock
517struct spin_queue_lock {
518 // Spin lock used for mutual exclusion
519 mcs_spin_lock lock;
520
521 // flag showing if lock is held
522 volatile bool held;
523
524 #ifdef __CFA_DEBUG__
525 // for deadlock detection
526 struct thread$ * owner;
527 #endif
528};
529
530static inline void ?{}( spin_queue_lock & this ) with(this) {
531 lock{};
532 held = false;
533}
534static inline void ^?{}( spin_queue_lock & this ) {}
535static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
536static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
537
538// if this is called recursively IT WILL DEADLOCK!!!!!
539static inline void lock(spin_queue_lock & this) with(this) {
540 mcs_spin_node node;
541 lock( lock, node );
542 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
543 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
544 unlock( lock, node );
545}
546
547static inline void unlock(spin_queue_lock & this) with(this) {
548 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
549}
550
551static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
552 unpark(t);
553}
554static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
555static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) {
556 #ifdef REACQ
557 lock(this);
558 #endif
559}
560
561
562//-----------------------------------------------------------------------------
563// MCS Block Spin Lock
564
565// - No reacquire for cond var
566// - No recursive acquisition
567// - No ownership
568// - Blocks but first node spins (like spin queue but blocking for not first thd)
569struct mcs_block_spin_lock {
570 // Spin lock used for mutual exclusion
571 mcs_lock lock;
572
573 // flag showing if lock is held
574 volatile bool held;
575};
576
577static inline void ?{}( mcs_block_spin_lock & this ) with(this) {
578 lock{};
579 held = false;
580}
581static inline void ^?{}( mcs_block_spin_lock & this ) {}
582static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
583static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
584
585// if this is called recursively IT WILL DEADLOCK!!!!!
586static inline void lock(mcs_block_spin_lock & this) with(this) {
587 mcs_node node;
588 lock( lock, node );
589 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
590 __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
591 unlock( lock, node );
592}
593
594static inline void unlock(mcs_block_spin_lock & this) with(this) {
595 __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
596}
597
598static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
599static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
600static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {
601 #ifdef REACQ
602 lock(this);
603 #endif
604}
605
606//-----------------------------------------------------------------------------
607// Block Spin Lock
608
609// - No reacquire for cond var
610// - No recursive acquisition
611// - No ownership
612// - Blocks but first node spins (like spin queue but blocking for not first thd)
613struct block_spin_lock {
614 // Spin lock used for mutual exclusion
615 fast_block_lock lock;
616
617 // flag showing if lock is held
618 volatile bool held;
619};
620
621static inline void ?{}( block_spin_lock & this ) with(this) {
622 lock{};
623 held = false;
624}
625static inline void ^?{}( block_spin_lock & this ) {}
626static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
627static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
628
629// if this is called recursively IT WILL DEADLOCK!!!!!
630static inline void lock(block_spin_lock & this) with(this) {
631 lock( lock );
632 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
633 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
634 unlock( lock );
635}
636
637static inline void unlock(block_spin_lock & this) with(this) {
638 __atomic_store_n(&held, false, __ATOMIC_RELEASE);
639}
640
641static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
642 #ifdef REACQ
643 // first we acquire internal fast_block_lock
644 lock( lock __cfaabi_dbg_ctx2 );
645 if ( held ) { // if internal fast_block_lock is held
646 insert_last( blocked_threads, *t );
647 unlock( lock );
648 return;
649 }
650 // if internal fast_block_lock is not held
651 held = true;
652 unlock( lock );
653
654 #endif
655 unpark(t);
656
657}
658static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
659static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
660 #ifdef REACQ
661 // now we acquire the entire block_spin_lock upon waking up
662 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
663 __atomic_store_n(&held, true, __ATOMIC_RELEASE);
664 unlock( lock ); // Now we release the internal fast_spin_lock
665 #endif
666}
667
668//-----------------------------------------------------------------------------
669// is_blocking_lock
670trait is_blocking_lock(L & | sized(L)) {
671 // For synchronization locks to use when acquiring
672 void on_notify( L &, struct thread$ * );
673
674 // For synchronization locks to use when releasing
675 size_t on_wait( L & );
676
677 // to set recursion count after getting signalled;
678 void on_wakeup( L &, size_t recursion );
679};
680
681//-----------------------------------------------------------------------------
682// // info_thread
683// // the info thread is a wrapper around a thread used
684// // to store extra data for use in the condition variable
685forall(L & | is_blocking_lock(L)) {
686 struct info_thread;
687
688 // // for use by sequence
689 // info_thread(L) *& Back( info_thread(L) * this );
690 // info_thread(L) *& Next( info_thread(L) * this );
691}
692
693//-----------------------------------------------------------------------------
694// Synchronization Locks
695forall(L & | is_blocking_lock(L)) {
696
697 //-----------------------------------------------------------------------------
698 // condition_variable
699
700 // The multi-tool condition variable
701 // - can pass timeouts to wait for either a signal or timeout
702 // - can wait without passing a lock
703 // - can have waiters reacquire different locks while waiting on the same cond var
704 // - has shadow queue
705 // - can be signalled outside of critical sections with no locks held
706 struct condition_variable {
707 // Spin lock used for mutual exclusion
708 __spinlock_t lock;
709
710 // List of blocked threads
711 dlist( info_thread(L) ) blocked_threads;
712
713 // Count of current blocked threads
714 int count;
715 };
716
717
718 void ?{}( condition_variable(L) & this );
719 void ^?{}( condition_variable(L) & this );
720
721 bool notify_one( condition_variable(L) & this );
722 bool notify_all( condition_variable(L) & this );
723
724 uintptr_t front( condition_variable(L) & this );
725
726 bool empty ( condition_variable(L) & this );
727 int counter( condition_variable(L) & this );
728
729 void wait( condition_variable(L) & this );
730 void wait( condition_variable(L) & this, uintptr_t info );
731 bool wait( condition_variable(L) & this, Duration duration );
732 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
733
734 void wait( condition_variable(L) & this, L & l );
735 void wait( condition_variable(L) & this, L & l, uintptr_t info );
736 bool wait( condition_variable(L) & this, L & l, Duration duration );
737 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
738
739 //-----------------------------------------------------------------------------
740 // fast_cond_var
741
742 // The trimmed and slim condition variable
743 // - no internal lock so you must hold a lock while using this cond var
744 // - signalling without holding branded lock is UNSAFE!
745 // - only allows usage of one lock, cond var is branded after usage
746
747 struct fast_cond_var {
748 // List of blocked threads
749 dlist( info_thread(L) ) blocked_threads;
750 #ifdef __CFA_DEBUG__
751 L * lock_used;
752 #endif
753 };
754
755 void ?{}( fast_cond_var(L) & this );
756 void ^?{}( fast_cond_var(L) & this );
757
758 bool notify_one( fast_cond_var(L) & this );
759 bool notify_all( fast_cond_var(L) & this );
760
761 uintptr_t front( fast_cond_var(L) & this );
762 bool empty ( fast_cond_var(L) & this );
763
764 void wait( fast_cond_var(L) & this, L & l );
765 void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
766
767
768 //-----------------------------------------------------------------------------
769 // pthread_cond_var
770 //
771 // - cond var with minimal footprint
772 // - supports operations needed for phthread cond
773
774 struct pthread_cond_var {
775 dlist( info_thread(L) ) blocked_threads;
776 __spinlock_t lock;
777 };
778
779 void ?{}( pthread_cond_var(L) & this );
780 void ^?{}( pthread_cond_var(L) & this );
781
782 bool notify_one( pthread_cond_var(L) & this );
783 bool notify_all( pthread_cond_var(L) & this );
784
785 uintptr_t front( pthread_cond_var(L) & this );
786 bool empty ( pthread_cond_var(L) & this );
787
788 void wait( pthread_cond_var(L) & this, L & l );
789 void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
790 bool wait( pthread_cond_var(L) & this, L & l, timespec t );
791 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
792}
Note: See TracBrowser for help on using the repository browser.