source: libcfa/src/concurrency/locks.hfa @ 3d4b7cc7

ADTast-experimental
Last change on this file since 3d4b7cc7 was 0348fd8, checked in by caparsons <caparson@…>, 23 months ago

fixed clh bug where you couldn't hold more than 1 clh lock at once

  • Property mode set to 100644
File size: 24.2 KB
RevLine 
[ab1b971]1//
2// Cforall Version 1.0.0 Copyright (C) 2021 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// locks.hfa -- PUBLIC
8// Runtime locks that used with the runtime thread system.
9//
10// Author           : Colby Alexander Parsons
11// Created On       : Thu Jan 21 19:46:50 2021
12// Last Modified By :
13// Last Modified On :
14// Update Count     :
15//
16
[f4e35326]17#pragma once
18
[848439f]19#include <stdbool.h>
[5a46e09]20#include <stdio.h>
[848439f]21
[ab1b971]22#include "bits/weakso_locks.hfa"
[88ac843e]23#include "containers/lockfree.hfa"
[82f4063]24#include "containers/list.hfa"
[f4ec5e45]25
[07033ce]26#include "limits.hfa"
[f4ec5e45]27#include "thread.hfa"
[848439f]28
29#include "time_t.hfa"
30#include "time.hfa"
31
[b77f0e1]32#include <fstream.hfa>
33
34
35// futex headers
36#include <linux/futex.h>      /* Definition of FUTEX_* constants */
37#include <sys/syscall.h>      /* Definition of SYS_* constants */
38#include <unistd.h>
39
[1ab773e0]40// undef to make a number of the locks not reacquire upon waking from a condlock
41#define REACQ 1
42
[f4ec5e45]43//-----------------------------------------------------------------------------
44// Semaphore
45struct semaphore {
46        __spinlock_t lock;
47        int count;
[e84ab3d]48        __queue_t(thread$) waiting;
[f4ec5e45]49};
50
51void  ?{}(semaphore & this, int count = 1);
52void ^?{}(semaphore & this);
53bool   P (semaphore & this);
54bool   V (semaphore & this);
55bool   V (semaphore & this, unsigned count);
[e84ab3d]56thread$ * V (semaphore & this, bool );
[f4ec5e45]57
[ab1b971]58//----------
59struct single_acquisition_lock {
60        inline blocking_lock;
61};
62
63static inline void  ?{}( single_acquisition_lock & this ) {((blocking_lock &)this){ false, false };}
64static inline void ^?{}( single_acquisition_lock & this ) {}
[22b7579]65static inline void   lock     ( single_acquisition_lock & this ) { lock    ( (blocking_lock &)this ); }
66static inline bool   try_lock ( single_acquisition_lock & this ) { return try_lock( (blocking_lock &)this ); }
67static inline void   unlock   ( single_acquisition_lock & this ) { unlock  ( (blocking_lock &)this ); }
68static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
69static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]70static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[ab1b971]71
72//----------
73struct owner_lock {
74        inline blocking_lock;
75};
76
77static inline void  ?{}( owner_lock & this ) {((blocking_lock &)this){ true, true };}
78static inline void ^?{}( owner_lock & this ) {}
[f19497c]79static inline void   lock     ( owner_lock & this ) { lock    ( (blocking_lock &)this ); }
[d27b6be]80static inline bool   try_lock ( owner_lock & this ) { return try_lock( (blocking_lock &)this ); }
[f19497c]81static inline void   unlock   ( owner_lock & this ) { unlock  ( (blocking_lock &)this ); }
[22b7579]82static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
83static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
[e84ab3d]84static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
[ab1b971]85
[7f958c4]86//-----------------------------------------------------------------------------
87// MCS Lock
[f4ec5e45]88struct mcs_node {
89        mcs_node * volatile next;
90        single_sem sem;
91};
92
[8f5576d5]93static inline void ?{}(mcs_node & this) { this.next = 0p; }
[f4ec5e45]94
95static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
96        return node->next;
97}
98
99struct mcs_lock {
100        mcs_queue(mcs_node) queue;
101};
102
103static inline void lock(mcs_lock & l, mcs_node & n) {
104        if(push(l.queue, &n))
105                wait(n.sem);
106}
107
108static inline void unlock(mcs_lock & l, mcs_node & n) {
109        mcs_node * next = advance(l.queue, &n);
110        if(next) post(next->sem);
111}
112
[f835806]113//-----------------------------------------------------------------------------
114// MCS Spin Lock
115// - No recursive acquisition
116// - Needs to be released by owner
117
118struct mcs_spin_node {
119        mcs_spin_node * volatile next;
[db7a3ad]120        volatile bool locked;
[f835806]121};
122
123struct mcs_spin_queue {
124        mcs_spin_node * volatile tail;
125};
126
127static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
128
129static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
130        return node->next;
131}
132
133struct mcs_spin_lock {
134        mcs_spin_queue queue;
135};
136
137static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
138        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
[76a798d]139        n.locked = true;
[9e3d123]140        if(prev == 0p) return;
141        prev->next = &n;
[76a798d]142        while(__atomic_load_n(&n.locked, __ATOMIC_RELAXED)) Pause();
[f835806]143}
144
145static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
146        mcs_spin_node * n_ptr = &n;
[9e3d123]147        if (__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return;
[76a798d]148        while (__atomic_load_n(&n.next, __ATOMIC_RELAXED) == 0p) {}
[9e3d123]149        n.next->locked = false;
[f835806]150}
151
[b77f0e1]152//-----------------------------------------------------------------------------
153// futex_mutex
154
155// - No cond var support
156// - Kernel thd blocking alternative to the spinlock
157// - No ownership (will deadlock on reacq)
158struct futex_mutex {
159        // lock state any state other than UNLOCKED is locked
160        // enum LockState { UNLOCKED = 0, UNCONTENDED = 1, CONTENDED = 2 };
161       
162        // stores a lock state
163        int val;
164};
165
166// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
[7d9598d8]167static inline int futex(int *uaddr, int futex_op, int val) {
[b77f0e1]168    return syscall(SYS_futex, uaddr, futex_op, val, NULL, NULL, 0);
169}
170
171static inline void  ?{}( futex_mutex & this ) with(this) { val = 0; }
172
173static inline bool internal_try_lock(futex_mutex & this, int & compare_val) with(this) {
174        return __atomic_compare_exchange_n((int*)&val, (int*)&compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
175}
176
177static inline int internal_exchange(futex_mutex & this) with(this) {
178        return __atomic_exchange_n((int*)&val, 2, __ATOMIC_ACQUIRE);
179}
180
181// if this is called recursively IT WILL DEADLOCK!!!!!
182static inline void lock(futex_mutex & this) with(this) {
183        int state;
184
185       
[7d9598d8]186        // // linear backoff omitted for now
187        // for( int spin = 4; spin < 1024; spin += spin) {
188        //      state = 0;
189        //      // if unlocked, lock and return
190        //      if (internal_try_lock(this, state)) return;
191        //      if (2 == state) break;
192        //      for (int i = 0; i < spin; i++) Pause();
193        // }
194
195        // no contention try to acquire
196        if (internal_try_lock(this, state)) return;
[b77f0e1]197       
198        // if not in contended state, set to be in contended state
199        if (state != 2) state = internal_exchange(this);
200
201        // block and spin until we win the lock
202        while (state != 0) {
203                futex((int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK
204                state = internal_exchange(this);
205        }
206}
207
208static inline void unlock(futex_mutex & this) with(this) {
209        // if uncontended do atomice unlock and then return
210        if (__atomic_fetch_sub(&val, 1, __ATOMIC_RELEASE) == 1) return; // TODO: try acq/rel
211       
212        // otherwise threads are blocked so we must wake one
213        __atomic_store_n((int *)&val, 0, __ATOMIC_RELEASE);
214        futex((int *)&val, FUTEX_WAKE, 1);
215}
216
217static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); }
218static inline size_t on_wait( futex_mutex & f ) {unlock(f); return 0;}
219
220// to set recursion count after getting signalled;
221static inline void on_wakeup( futex_mutex & f, size_t recursion ) {}
222
[f835806]223//-----------------------------------------------------------------------------
224// CLH Spinlock
225// - No recursive acquisition
226// - Needs to be released by owner
227
228struct clh_lock {
229        volatile bool * volatile tail;
[0348fd8]230    volatile bool * volatile head;
[f835806]231};
232
233static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
234static inline void ^?{}( clh_lock & this ) { free(this.tail); }
235
236static inline void lock(clh_lock & l) {
237        thread$ * curr_thd = active_thread();
238        *(curr_thd->clh_node) = false;
239        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
[0348fd8]240        while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();
241    __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);
242    curr_thd->clh_node = prev;
[f835806]243}
244
245static inline void unlock(clh_lock & l) {
[0348fd8]246        __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);
[f835806]247}
248
[b77f0e1]249static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }
250static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }
251static inline void on_wakeup(clh_lock & this, size_t recursion ) {
252        #ifdef REACQ
253        lock(this);
254        #endif
255}
256
257
[7f958c4]258//-----------------------------------------------------------------------------
259// Linear backoff Spinlock
[5a46e09]260struct linear_backoff_then_block_lock {
261        // Spin lock used for mutual exclusion
262        __spinlock_t spinlock;
263
264        // List of blocked threads
[e84ab3d]265        dlist( thread$ ) blocked_threads;
[5a46e09]266
267        // Used for comparing and exchanging
268        volatile size_t lock_value;
269};
270
[b77f0e1]271static inline void  ?{}( linear_backoff_then_block_lock & this ) {
[5a46e09]272        this.spinlock{};
273        this.blocked_threads{};
274        this.lock_value = 0;
275}
276static inline void ^?{}( linear_backoff_then_block_lock & this ) {}
[b77f0e1]277// static inline void ?{}( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
278// static inline void ?=?( linear_backoff_then_block_lock & this, linear_backoff_then_block_lock this2 ) = void;
[5a46e09]279
280static inline bool internal_try_lock(linear_backoff_then_block_lock & this, size_t & compare_val) with(this) {
281        if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
282                return true;
283        }
284        return false;
285}
286
287static inline bool try_lock(linear_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }
288
289static inline bool try_lock_contention(linear_backoff_then_block_lock & this) with(this) {
290        if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) {
291                return true;
292        }
293        return false;
294}
295
296static inline bool block(linear_backoff_then_block_lock & this) with(this) {
[7d9598d8]297        lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC)
[5a46e09]298        if (lock_value != 2) {
299                unlock( spinlock );
300                return true;
301        }
302        insert_last( blocked_threads, *active_thread() );
303        unlock( spinlock );
304        park( );
305        return true;
306}
307
[0d4f954]308static inline void lock(linear_backoff_then_block_lock & this) with(this) {
[5a46e09]309        size_t compare_val = 0;
[b77f0e1]310        int spin = 4;
[5a46e09]311        // linear backoff
312        for( ;; ) {
313                compare_val = 0;
[0d4f954]314                if (internal_try_lock(this, compare_val)) return;
[5a46e09]315                if (2 == compare_val) break;
316                for (int i = 0; i < spin; i++) Pause();
[b77f0e1]317                if (spin >= 1024) break;
[5a46e09]318                spin += spin;
319        }
320
[0d4f954]321        if(2 != compare_val && try_lock_contention(this)) return;
[b7763da]322        // block until signalled
[0d4f954]323        while (block(this)) if(try_lock_contention(this)) return;
[b7763da]324}
325
[5a46e09]326static inline void unlock(linear_backoff_then_block_lock & this) with(this) {
327    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
328        lock( spinlock __cfaabi_dbg_ctx2 );
[e84ab3d]329        thread$ * t = &try_pop_front( blocked_threads );
[5a46e09]330        unlock( spinlock );
331        unpark( t );
332}
333
[e84ab3d]334static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
[dcad80a]335static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
[b77f0e1]336static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) {
337        #ifdef REACQ
338        lock(this);
339        #endif
340}
[5a46e09]341
[7f958c4]342//-----------------------------------------------------------------------------
343// Fast Block Lock
344
[f835806]345// minimal blocking lock
[7f958c4]346// - No reacquire for cond var
347// - No recursive acquisition
348// - No ownership
349struct fast_block_lock {
350        // List of blocked threads
351        dlist( thread$ ) blocked_threads;
352
[f835806]353        // Spin lock used for mutual exclusion
354        __spinlock_t lock;
355
356        // flag showing if lock is held
[7f958c4]357        bool held:1;
358};
359
360static inline void  ?{}( fast_block_lock & this ) with(this) {
361        lock{};
362        blocked_threads{};
363        held = false;
364}
365static inline void ^?{}( fast_block_lock & this ) {}
366static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void;
367static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void;
368
369// if this is called recursively IT WILL DEADLOCK!!!!!
370static inline void lock(fast_block_lock & this) with(this) {
371        lock( lock __cfaabi_dbg_ctx2 );
[b77f0e1]372        if ( held ) {
[7f958c4]373                insert_last( blocked_threads, *active_thread() );
374                unlock( lock );
375                park( );
376                return;
377        }
378        held = true;
379        unlock( lock );
380}
381
382static inline void unlock(fast_block_lock & this) with(this) {
383        lock( lock __cfaabi_dbg_ctx2 );
384        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
385        thread$ * t = &try_pop_front( blocked_threads );
386        held = ( t ? true : false );
387        unpark( t );
388        unlock( lock );
389}
390
[b77f0e1]391static inline void on_notify(fast_block_lock & this, struct thread$ * t ) with(this) {
392        #ifdef REACQ
393                lock( lock __cfaabi_dbg_ctx2 );
394                insert_last( blocked_threads, *t );
395                unlock( lock );
396        #else
397                unpark(t);
398        #endif
399}
[7f958c4]400static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
401static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
402
[f835806]403//-----------------------------------------------------------------------------
404// simple_owner_lock
405
406// pthread owner lock
407// - reacquire for cond var
408// - recursive acquisition
409// - ownership
410struct simple_owner_lock {
411        // List of blocked threads
412        dlist( thread$ ) blocked_threads;
413
414        // Spin lock used for mutual exclusion
415        __spinlock_t lock;
416
417        // owner showing if lock is held
418        struct thread$ * owner;
419
420        size_t recursion_count;
421};
422
423static inline void  ?{}( simple_owner_lock & this ) with(this) {
424        lock{};
425        blocked_threads{};
426        owner = 0p;
427        recursion_count = 0;
428}
429static inline void ^?{}( simple_owner_lock & this ) {}
430static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
431static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
432
[ae06e0b]433static inline void lock(simple_owner_lock & this) with(this) {
434        if (owner == active_thread()) {
435                recursion_count++;
436                return;
437        }
438        lock( lock __cfaabi_dbg_ctx2 );
439
440        if (owner != 0p) {
441                insert_last( blocked_threads, *active_thread() );
442                unlock( lock );
443                park( );
444                return;
445        }
446        owner = active_thread();
447        recursion_count = 1;
448        unlock( lock );
449}
450
451// TODO: fix duplicate def issue and bring this back
452// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
453        // thread$ * t = &try_pop_front( blocked_threads );
454        // owner = t;
455        // recursion_count = ( t ? 1 : 0 );
456        // unpark( t );
457// }
458
459static inline void unlock(simple_owner_lock & this) with(this) {
460        lock( lock __cfaabi_dbg_ctx2 );
461        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
462        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
463        // if recursion count is zero release lock and set new owner if one is waiting
464        recursion_count--;
465        if ( recursion_count == 0 ) {
466                // pop_and_set_new_owner( this );
467                thread$ * t = &try_pop_front( blocked_threads );
468                owner = t;
469                recursion_count = ( t ? 1 : 0 );
470                unpark( t );
471        }
472        unlock( lock );
473}
474
475static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
476        lock( lock __cfaabi_dbg_ctx2 );
477        // lock held
478        if ( owner != 0p ) {
479                insert_last( blocked_threads, *t );
480        }
481        // lock not held
482        else {
483                owner = t;
484                recursion_count = 1;
485                unpark( t );
486        }
[b77f0e1]487        unlock( lock );
[ae06e0b]488}
489
[88ac843e]490static inline size_t on_wait(simple_owner_lock & this) with(this) {
[ae06e0b]491        lock( lock __cfaabi_dbg_ctx2 );
492        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
493        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
494
495        size_t ret = recursion_count;
496
497        // pop_and_set_new_owner( this );
498
499        thread$ * t = &try_pop_front( blocked_threads );
500        owner = t;
501        recursion_count = ( t ? 1 : 0 );
502        unpark( t );
503
504        unlock( lock );
505        return ret;
506}
507
508static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
509
[f835806]510//-----------------------------------------------------------------------------
511// Spin Queue Lock
512
513// - No reacquire for cond var
514// - No recursive acquisition
515// - No ownership
516// - spin lock with no locking/atomics in unlock
517struct spin_queue_lock {
518        // Spin lock used for mutual exclusion
519        mcs_spin_lock lock;
520
521        // flag showing if lock is held
[db7a3ad]522        volatile bool held;
[f835806]523
524        #ifdef __CFA_DEBUG__
525        // for deadlock detection
526        struct thread$ * owner;
527        #endif
528};
529
530static inline void  ?{}( spin_queue_lock & this ) with(this) {
531        lock{};
532        held = false;
533}
534static inline void ^?{}( spin_queue_lock & this ) {}
535static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
536static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
537
538// if this is called recursively IT WILL DEADLOCK!!!!!
539static inline void lock(spin_queue_lock & this) with(this) {
540        mcs_spin_node node;
541        lock( lock, node );
[df932552]542        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
543        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]544        unlock( lock, node );
545}
546
547static inline void unlock(spin_queue_lock & this) with(this) {
[2ed32fa7]548        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]549}
550
[b77f0e1]551static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) {
552        unpark(t);
553}
[f835806]554static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
[b77f0e1]555static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) {
556        #ifdef REACQ
557        lock(this);
558        #endif
559}
[f835806]560
561
562//-----------------------------------------------------------------------------
563// MCS Block Spin Lock
564
565// - No reacquire for cond var
566// - No recursive acquisition
567// - No ownership
568// - Blocks but first node spins (like spin queue but blocking for not first thd)
569struct mcs_block_spin_lock {
570        // Spin lock used for mutual exclusion
571        mcs_lock lock;
572
573        // flag showing if lock is held
[db7a3ad]574        volatile bool held;
[f835806]575};
576
577static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
578        lock{};
579        held = false;
580}
581static inline void ^?{}( mcs_block_spin_lock & this ) {}
582static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
583static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
584
585// if this is called recursively IT WILL DEADLOCK!!!!!
586static inline void lock(mcs_block_spin_lock & this) with(this) {
587        mcs_node node;
588        lock( lock, node );
[fd365da]589        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
590        __atomic_store_n(&held, true, __ATOMIC_SEQ_CST);
[f835806]591        unlock( lock, node );
592}
593
594static inline void unlock(mcs_block_spin_lock & this) with(this) {
[fd365da]595        __atomic_store_n(&held, false, __ATOMIC_SEQ_CST);
[f835806]596}
597
598static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
599static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
[b77f0e1]600static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) {
601        #ifdef REACQ
602        lock(this);
603        #endif
604}
[f835806]605
606//-----------------------------------------------------------------------------
607// Block Spin Lock
608
609// - No reacquire for cond var
610// - No recursive acquisition
611// - No ownership
612// - Blocks but first node spins (like spin queue but blocking for not first thd)
613struct block_spin_lock {
614        // Spin lock used for mutual exclusion
615        fast_block_lock lock;
616
617        // flag showing if lock is held
[db7a3ad]618        volatile bool held;
[f835806]619};
620
621static inline void  ?{}( block_spin_lock & this ) with(this) {
622        lock{};
623        held = false;
624}
625static inline void ^?{}( block_spin_lock & this ) {}
626static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
627static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
628
629// if this is called recursively IT WILL DEADLOCK!!!!!
630static inline void lock(block_spin_lock & this) with(this) {
631        lock( lock );
[fd365da]632        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
633        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
[f835806]634        unlock( lock );
635}
636
637static inline void unlock(block_spin_lock & this) with(this) {
[fd365da]638        __atomic_store_n(&held, false, __ATOMIC_RELEASE);
[f835806]639}
640
[b77f0e1]641static inline void on_notify(block_spin_lock & this, struct thread$ * t ) with(this.lock) {
642  #ifdef REACQ
643        // first we acquire internal fast_block_lock
644        lock( lock __cfaabi_dbg_ctx2 );
645        if ( held ) { // if internal fast_block_lock is held
646                insert_last( blocked_threads, *t );
647                unlock( lock );
648                return;
649        }
650        // if internal fast_block_lock is not held
651        held = true;
652        unlock( lock );
653
654  #endif
655        unpark(t);
656       
657}
[f835806]658static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
[b77f0e1]659static inline void on_wakeup(block_spin_lock & this, size_t recursion ) with(this) {
660  #ifdef REACQ
661        // now we acquire the entire block_spin_lock upon waking up
662        while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause();
663        __atomic_store_n(&held, true, __ATOMIC_RELEASE);
664        unlock( lock ); // Now we release the internal fast_spin_lock
665  #endif
666}
[f835806]667
[ac5816d]668//-----------------------------------------------------------------------------
669// is_blocking_lock
[fd54fef]670trait is_blocking_lock(L & | sized(L)) {
[ac5816d]671        // For synchronization locks to use when acquiring
[e84ab3d]672        void on_notify( L &, struct thread$ * );
[ac5816d]673
674        // For synchronization locks to use when releasing
[22b7579]675        size_t on_wait( L & );
[ac5816d]676
677        // to set recursion count after getting signalled;
[22b7579]678        void on_wakeup( L &, size_t recursion );
[ac5816d]679};
[848439f]680
[ac5816d]681//-----------------------------------------------------------------------------
[82f4063]682// // info_thread
683// // the info thread is a wrapper around a thread used
684// // to store extra data for use in the condition variable
[fd54fef]685forall(L & | is_blocking_lock(L)) {
[ac5816d]686        struct info_thread;
[c131a02]687
[82f4063]688        // // for use by sequence
689        // info_thread(L) *& Back( info_thread(L) * this );
690        // info_thread(L) *& Next( info_thread(L) * this );
[848439f]691}
692
[ac5816d]693//-----------------------------------------------------------------------------
694// Synchronization Locks
[fd54fef]695forall(L & | is_blocking_lock(L)) {
[7f958c4]696
697        //-----------------------------------------------------------------------------
698        // condition_variable
699
700        // The multi-tool condition variable
701        // - can pass timeouts to wait for either a signal or timeout
702        // - can wait without passing a lock
703        // - can have waiters reacquire different locks while waiting on the same cond var
704        // - has shadow queue
705        // - can be signalled outside of critical sections with no locks held
[eeb5023]706        struct condition_variable {
[848439f]707                // Spin lock used for mutual exclusion
708                __spinlock_t lock;
709
710                // List of blocked threads
[82f4063]711                dlist( info_thread(L) ) blocked_threads;
[848439f]712
713                // Count of current blocked threads
714                int count;
715        };
[e84ab3d]716
[848439f]717
[ac5816d]718        void  ?{}( condition_variable(L) & this );
[848439f]719        void ^?{}( condition_variable(L) & this );
720
[eeb5023]721        bool notify_one( condition_variable(L) & this );
722        bool notify_all( condition_variable(L) & this );
[848439f]723
[eeb5023]724        uintptr_t front( condition_variable(L) & this );
[848439f]725
[ac5816d]726        bool empty  ( condition_variable(L) & this );
727        int  counter( condition_variable(L) & this );
[848439f]728
[eeb5023]729        void wait( condition_variable(L) & this );
730        void wait( condition_variable(L) & this, uintptr_t info );
[dff1fd1]731        bool wait( condition_variable(L) & this, Duration duration );
732        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration );
[848439f]733
[eeb5023]734        void wait( condition_variable(L) & this, L & l );
735        void wait( condition_variable(L) & this, L & l, uintptr_t info );
[dff1fd1]736        bool wait( condition_variable(L) & this, L & l, Duration duration );
737        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration );
[7f958c4]738
739        //-----------------------------------------------------------------------------
740        // fast_cond_var
741
742        // The trimmed and slim condition variable
743        // - no internal lock so you must hold a lock while using this cond var
744        // - signalling without holding branded lock is UNSAFE!
745        // - only allows usage of one lock, cond var is branded after usage
[ae06e0b]746
[7f958c4]747        struct fast_cond_var {
748                // List of blocked threads
749                dlist( info_thread(L) ) blocked_threads;
750                #ifdef __CFA_DEBUG__
751                L * lock_used;
752                #endif
753        };
754
755        void  ?{}( fast_cond_var(L) & this );
756        void ^?{}( fast_cond_var(L) & this );
757
758        bool notify_one( fast_cond_var(L) & this );
759        bool notify_all( fast_cond_var(L) & this );
760
761        uintptr_t front( fast_cond_var(L) & this );
762        bool empty  ( fast_cond_var(L) & this );
763
764        void wait( fast_cond_var(L) & this, L & l );
765        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
[ae06e0b]766
767
768        //-----------------------------------------------------------------------------
769        // pthread_cond_var
770        //
771        // - cond var with minimal footprint
772        // - supports operations needed for phthread cond
773
774        struct pthread_cond_var {
775                dlist( info_thread(L) ) blocked_threads;
776                __spinlock_t lock;
777        };
778
779        void  ?{}( pthread_cond_var(L) & this );
780        void ^?{}( pthread_cond_var(L) & this );
781
782        bool notify_one( pthread_cond_var(L) & this );
783        bool notify_all( pthread_cond_var(L) & this );
784
785        uintptr_t front( pthread_cond_var(L) & this );
786        bool empty ( pthread_cond_var(L) & this );
787
788        void wait( pthread_cond_var(L) & this, L & l );
789        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
790        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
791        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
[f4ec5e45]792}
Note: See TracBrowser for help on using the repository browser.