Changes in / [fc134a48:e5d9274]


Ignore:
Files:
10 added
4 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/invoke.h

    rfc134a48 re5d9274  
    195195                struct __monitor_group_t monitors;
    196196
    197                 // used to put threads on user data structures
    198                 struct {
    199                         struct thread$ * next;
    200                         struct thread$ * back;
    201                 } seqable;
    202 
    203197                // used to put threads on dlist data structure
    204198                __cfa_dlink(thread$);
     
    208202                        struct thread$ * prev;
    209203                } node;
     204
     205                // used to store state between clh lock/unlock
     206                volatile bool * clh_prev;
     207
     208                // used to point to this thd's current clh node
     209                volatile bool * clh_node;
    210210
    211211                struct processor * last_proc;
     
    240240                }
    241241
    242                 static inline thread$ * volatile & ?`next ( thread$ * this )  __attribute__((const)) {
    243                         return this->seqable.next;
    244                 }
    245 
    246                 static inline thread$ *& Back( thread$ * this ) __attribute__((const)) {
    247                         return this->seqable.back;
    248                 }
    249 
    250                 static inline thread$ *& Next( thread$ * this ) __attribute__((const)) {
    251                                 return this->seqable.next;
    252                 }
    253 
    254                 static inline bool listed( thread$ * this ) {
    255                         return this->seqable.next != 0p;
    256                 }
    257 
    258242                static inline void ?{}(__monitor_group_t & this) {
    259243                        (this.data){0p};
  • libcfa/src/concurrency/locks.cfa

    rfc134a48 re5d9274  
    176176        recursion_count = recursion;
    177177}
     178
     179//-----------------------------------------------------------------------------
     180// simple_owner_lock
     181
     182static inline void lock(simple_owner_lock & this) with(this) {
     183        if (owner == active_thread()) {
     184                recursion_count++;
     185                return;
     186        }
     187        lock( lock __cfaabi_dbg_ctx2 );
     188
     189        if (owner != 0p) {
     190                insert_last( blocked_threads, *active_thread() );
     191                unlock( lock );
     192                park( );
     193                return;
     194        }
     195        owner = active_thread();
     196        recursion_count = 1;
     197        unlock( lock );
     198}
     199
     200void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
     201        thread$ * t = &try_pop_front( blocked_threads );
     202        owner = t;
     203        recursion_count = ( t ? 1 : 0 );
     204        unpark( t );
     205}
     206
     207static inline void unlock(simple_owner_lock & this) with(this) {
     208        lock( lock __cfaabi_dbg_ctx2 );
     209        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     210        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
     211        // if recursion count is zero release lock and set new owner if one is waiting
     212        recursion_count--;
     213        if ( recursion_count == 0 ) {
     214                pop_and_set_new_owner( this );
     215        }
     216        unlock( lock );
     217}
     218
     219static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
     220        lock( lock __cfaabi_dbg_ctx2 );
     221        // lock held
     222        if ( owner != 0p ) {
     223                insert_last( blocked_threads, *t );
     224                unlock( lock );
     225        }
     226        // lock not held
     227        else {
     228                owner = t;
     229                recursion_count = 1;
     230                unpark( t );
     231                unlock( lock );
     232        }
     233}
     234
     235static inline size_t on_wait(simple_owner_lock & this) with(this) {
     236        lock( lock __cfaabi_dbg_ctx2 );
     237        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     238        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
     239
     240        size_t ret = recursion_count;
     241
     242        pop_and_set_new_owner( this );
     243
     244        unlock( lock );
     245        return ret;
     246}
     247
     248static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     249
    178250
    179251//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/locks.hfa

    rfc134a48 re5d9274  
    101101
    102102//-----------------------------------------------------------------------------
     103// MCS Spin Lock
     104// - No recursive acquisition
     105// - Needs to be released by owner
     106
     107struct mcs_spin_node {
     108        mcs_spin_node * volatile next;
     109        bool locked:1;
     110};
     111
     112struct mcs_spin_queue {
     113        mcs_spin_node * volatile tail;
     114};
     115
     116static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
     117
     118static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
     119        return node->next;
     120}
     121
     122struct mcs_spin_lock {
     123        mcs_spin_queue queue;
     124};
     125
     126static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
     127        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
     128        if(prev != 0p) {
     129                prev->next = &n;
     130                while(n.locked) Pause();
     131        }
     132}
     133
     134static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
     135        mcs_spin_node * n_ptr = &n;
     136        if (!__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     137                while (n.next == 0p) {}
     138                n.next->locked = false;
     139        }
     140}
     141
     142//-----------------------------------------------------------------------------
     143// CLH Spinlock
     144// - No recursive acquisition
     145// - Needs to be released by owner
     146
     147struct clh_lock {
     148        volatile bool * volatile tail;
     149};
     150
     151static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
     152static inline void ^?{}( clh_lock & this ) { free(this.tail); }
     153
     154static inline void lock(clh_lock & l) {
     155        thread$ * curr_thd = active_thread();
     156        *(curr_thd->clh_node) = false;
     157        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
     158        while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
     159        curr_thd->clh_prev = prev;
     160}
     161
     162static inline void unlock(clh_lock & l) {
     163        thread$ * curr_thd = active_thread();
     164        __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
     165        curr_thd->clh_node = curr_thd->clh_prev;
     166}
     167
     168//-----------------------------------------------------------------------------
    103169// Linear backoff Spinlock
    104170struct linear_backoff_then_block_lock {
     
    205271// Fast Block Lock
    206272
    207 // High efficiency minimal blocking lock
     273// minimal blocking lock
    208274// - No reacquire for cond var
    209275// - No recursive acquisition
    210276// - No ownership
    211277struct fast_block_lock {
     278        // List of blocked threads
     279        dlist( thread$ ) blocked_threads;
     280
    212281        // Spin lock used for mutual exclusion
    213282        __spinlock_t lock;
    214283
    215         // List of blocked threads
    216         dlist( thread$ ) blocked_threads;
    217 
     284        // flag showing if lock is held
    218285        bool held:1;
     286
     287        #ifdef __CFA_DEBUG__
     288        // for deadlock detection
     289        struct thread$ * owner;
     290        #endif
    219291};
    220292
     
    231303static inline void lock(fast_block_lock & this) with(this) {
    232304        lock( lock __cfaabi_dbg_ctx2 );
     305
     306        #ifdef __CFA_DEBUG__
     307        assert(!(held && owner == active_thread()));
     308        #endif
    233309        if (held) {
    234310                insert_last( blocked_threads, *active_thread() );
     
    238314        }
    239315        held = true;
     316        #ifdef __CFA_DEBUG__
     317        owner = active_thread();
     318        #endif
    240319        unlock( lock );
    241320}
     
    246325        thread$ * t = &try_pop_front( blocked_threads );
    247326        held = ( t ? true : false );
     327        #ifdef __CFA_DEBUG__
     328        owner = ( t ? t : 0p );
     329        #endif
    248330        unpark( t );
    249331        unlock( lock );
     
    253335static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
    254336static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
     337
     338//-----------------------------------------------------------------------------
     339// simple_owner_lock
     340
     341// pthread owner lock
     342// - reacquire for cond var
     343// - recursive acquisition
     344// - ownership
     345struct simple_owner_lock {
     346        // List of blocked threads
     347        dlist( thread$ ) blocked_threads;
     348
     349        // Spin lock used for mutual exclusion
     350        __spinlock_t lock;
     351
     352        // owner showing if lock is held
     353        struct thread$ * owner;
     354
     355        size_t recursion_count;
     356};
     357
     358static inline void  ?{}( simple_owner_lock & this ) with(this) {
     359        lock{};
     360        blocked_threads{};
     361        owner = 0p;
     362        recursion_count = 0;
     363}
     364static inline void ^?{}( simple_owner_lock & this ) {}
     365static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
     366static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
     367
     368//-----------------------------------------------------------------------------
     369// Spin Queue Lock
     370
     371// - No reacquire for cond var
     372// - No recursive acquisition
     373// - No ownership
     374// - spin lock with no locking/atomics in unlock
     375struct spin_queue_lock {
     376        // Spin lock used for mutual exclusion
     377        mcs_spin_lock lock;
     378
     379        // flag showing if lock is held
     380        bool held:1;
     381
     382        #ifdef __CFA_DEBUG__
     383        // for deadlock detection
     384        struct thread$ * owner;
     385        #endif
     386};
     387
     388static inline void  ?{}( spin_queue_lock & this ) with(this) {
     389        lock{};
     390        held = false;
     391}
     392static inline void ^?{}( spin_queue_lock & this ) {}
     393static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
     394static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
     395
     396// if this is called recursively IT WILL DEADLOCK!!!!!
     397static inline void lock(spin_queue_lock & this) with(this) {
     398        mcs_spin_node node;
     399        #ifdef __CFA_DEBUG__
     400        assert(!(held && owner == active_thread()));
     401        #endif
     402        lock( lock, node );
     403        while(held) Pause();
     404        held = true;
     405        unlock( lock, node );
     406        #ifdef __CFA_DEBUG__
     407        owner = active_thread();
     408        #endif
     409}
     410
     411static inline void unlock(spin_queue_lock & this) with(this) {
     412        #ifdef __CFA_DEBUG__
     413        owner = 0p;
     414        #endif
     415        held = false;
     416}
     417
     418static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
     419static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
     420static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
     421
     422
     423//-----------------------------------------------------------------------------
     424// MCS Block Spin Lock
     425
     426// - No reacquire for cond var
     427// - No recursive acquisition
     428// - No ownership
     429// - Blocks but first node spins (like spin queue but blocking for not first thd)
     430struct mcs_block_spin_lock {
     431        // Spin lock used for mutual exclusion
     432        mcs_lock lock;
     433
     434        // flag showing if lock is held
     435        bool held:1;
     436
     437        #ifdef __CFA_DEBUG__
     438        // for deadlock detection
     439        struct thread$ * owner;
     440        #endif
     441};
     442
     443static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
     444        lock{};
     445        held = false;
     446}
     447static inline void ^?{}( mcs_block_spin_lock & this ) {}
     448static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
     449static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
     450
     451// if this is called recursively IT WILL DEADLOCK!!!!!
     452static inline void lock(mcs_block_spin_lock & this) with(this) {
     453        mcs_node node;
     454        #ifdef __CFA_DEBUG__
     455        assert(!(held && owner == active_thread()));
     456        #endif
     457        lock( lock, node );
     458        while(held) Pause();
     459        held = true;
     460        unlock( lock, node );
     461        #ifdef __CFA_DEBUG__
     462        owner = active_thread();
     463        #endif
     464}
     465
     466static inline void unlock(mcs_block_spin_lock & this) with(this) {
     467        #ifdef __CFA_DEBUG__
     468        owner = 0p;
     469        #endif
     470        held = false;
     471}
     472
     473static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
     474static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
     475static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
     476
     477//-----------------------------------------------------------------------------
     478// Block Spin Lock
     479
     480// - No reacquire for cond var
     481// - No recursive acquisition
     482// - No ownership
     483// - Blocks but first node spins (like spin queue but blocking for not first thd)
     484struct block_spin_lock {
     485        // Spin lock used for mutual exclusion
     486        fast_block_lock lock;
     487
     488        // flag showing if lock is held
     489        bool held:1;
     490
     491        #ifdef __CFA_DEBUG__
     492        // for deadlock detection
     493        struct thread$ * owner;
     494        #endif
     495};
     496
     497static inline void  ?{}( block_spin_lock & this ) with(this) {
     498        lock{};
     499        held = false;
     500}
     501static inline void ^?{}( block_spin_lock & this ) {}
     502static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
     503static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
     504
     505// if this is called recursively IT WILL DEADLOCK!!!!!
     506static inline void lock(block_spin_lock & this) with(this) {
     507        #ifdef __CFA_DEBUG__
     508        assert(!(held && owner == active_thread()));
     509        #endif
     510        lock( lock );
     511        while(held) Pause();
     512        held = true;
     513        unlock( lock );
     514        #ifdef __CFA_DEBUG__
     515        owner = active_thread();
     516        #endif
     517}
     518
     519static inline void unlock(block_spin_lock & this) with(this) {
     520        #ifdef __CFA_DEBUG__
     521        owner = 0p;
     522        #endif
     523        held = false;
     524}
     525
     526static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
     527static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
     528static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
    255529
    256530//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/thread.cfa

    rfc134a48 re5d9274  
    5353        #endif
    5454
    55         seqable.next = 0p;
    56         seqable.back = 0p;
    57 
    5855        node.next = 0p;
    5956        node.prev = 0p;
     57
     58        clh_node = malloc( );
     59        *clh_node = false;
     60
    6061        doregister(curr_cluster, this);
    61 
    6262        monitors{ &self_mon_p, 1, (fptr_t)0 };
    6363}
     
    6767                canary = 0xDEADDEADDEADDEADp;
    6868        #endif
     69        free(clh_node);
    6970        unregister(curr_cluster, this);
    7071        ^self_cor{};
Note: See TracChangeset for help on using the changeset viewer.