Ignore:
Timestamp:
Jun 2, 2022, 2:39:11 PM (2 years ago)
Author:
caparsons <caparson@…>
Branches:
ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
Children:
fb63c70
Parents:
e5628db
Message:

added some locks and cleaned up unused seqable field in thread block

Location:
libcfa/src/concurrency
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/invoke.h

    re5628db rf835806  
    195195                struct __monitor_group_t monitors;
    196196
    197                 // used to put threads on user data structures
    198                 struct {
    199                         struct thread$ * next;
    200                         struct thread$ * back;
    201                 } seqable;
    202 
    203197                // used to put threads on dlist data structure
    204198                __cfa_dlink(thread$);
     
    208202                        struct thread$ * prev;
    209203                } node;
     204
     205                // used to store state between clh lock/unlock
     206                volatile bool * clh_prev;
     207
     208                // used to point to this thd's current clh node
     209                volatile bool * clh_node;
    210210
    211211                struct processor * last_proc;
     
    240240                }
    241241
    242                 static inline thread$ * volatile & ?`next ( thread$ * this )  __attribute__((const)) {
    243                         return this->seqable.next;
    244                 }
    245 
    246                 static inline thread$ *& Back( thread$ * this ) __attribute__((const)) {
    247                         return this->seqable.back;
    248                 }
    249 
    250                 static inline thread$ *& Next( thread$ * this ) __attribute__((const)) {
    251                                 return this->seqable.next;
    252                 }
    253 
    254                 static inline bool listed( thread$ * this ) {
    255                         return this->seqable.next != 0p;
    256                 }
    257 
    258242                static inline void ?{}(__monitor_group_t & this) {
    259243                        (this.data){0p};
  • libcfa/src/concurrency/locks.hfa

    re5628db rf835806  
    101101
    102102//-----------------------------------------------------------------------------
     103// MCS Spin Lock
     104// - No recursive acquisition
     105// - Needs to be released by owner
     106
     107struct mcs_spin_node {
     108        mcs_spin_node * volatile next;
     109        bool locked:1;
     110};
     111
     112struct mcs_spin_queue {
     113        mcs_spin_node * volatile tail;
     114};
     115
     116static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
     117
     118static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
     119        return node->next;
     120}
     121
     122struct mcs_spin_lock {
     123        mcs_spin_queue queue;
     124};
     125
     126static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
     127        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
     128        if(prev != 0p) {
     129                prev->next = &n;
     130                while(n.locked) Pause();
     131        }
     132}
     133
     134static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
     135        mcs_spin_node * n_ptr = &n;
     136        if (!__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     137                while (n.next == 0p) {}
     138                n.next->locked = false;
     139        }
     140}
     141
     142//-----------------------------------------------------------------------------
     143// CLH Spinlock
     144// - No recursive acquisition
     145// - Needs to be released by owner
     146
     147struct clh_lock {
     148        volatile bool * volatile tail;
     149};
     150
     151static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
     152static inline void ^?{}( clh_lock & this ) { free(this.tail); }
     153
     154static inline void lock(clh_lock & l) {
     155        thread$ * curr_thd = active_thread();
     156        *(curr_thd->clh_node) = false;
     157        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
     158        while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
     159        curr_thd->clh_prev = prev;
     160}
     161
     162static inline void unlock(clh_lock & l) {
     163        thread$ * curr_thd = active_thread();
     164        __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
     165        curr_thd->clh_node = curr_thd->clh_prev;
     166}
     167
     168//-----------------------------------------------------------------------------
    103169// Linear backoff Spinlock
    104170struct linear_backoff_then_block_lock {
     
    205271// Fast Block Lock
    206272
    207 // High efficiency minimal blocking lock
     273// minimal blocking lock
    208274// - No reacquire for cond var
    209275// - No recursive acquisition
    210276// - No ownership
    211277struct fast_block_lock {
     278        // List of blocked threads
     279        dlist( thread$ ) blocked_threads;
     280
    212281        // Spin lock used for mutual exclusion
    213282        __spinlock_t lock;
    214283
    215         // List of blocked threads
    216         dlist( thread$ ) blocked_threads;
    217 
     284        // flag showing if lock is held
    218285        bool held:1;
     286
     287        #ifdef __CFA_DEBUG__
     288        // for deadlock detection
     289        struct thread$ * owner;
     290        #endif
    219291};
    220292
     
    231303static inline void lock(fast_block_lock & this) with(this) {
    232304        lock( lock __cfaabi_dbg_ctx2 );
     305
     306        #ifdef __CFA_DEBUG__
     307        assert(!(held && owner == active_thread()));
     308        #endif
    233309        if (held) {
    234310                insert_last( blocked_threads, *active_thread() );
     
    238314        }
    239315        held = true;
     316        #ifdef __CFA_DEBUG__
     317        owner = active_thread();
     318        #endif
    240319        unlock( lock );
    241320}
     
    246325        thread$ * t = &try_pop_front( blocked_threads );
    247326        held = ( t ? true : false );
     327        #ifdef __CFA_DEBUG__
     328        owner = ( t ? t : 0p );
     329        #endif
    248330        unpark( t );
    249331        unlock( lock );
     
    253335static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
    254336static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
     337
     338//-----------------------------------------------------------------------------
     339// simple_owner_lock
     340
     341// pthread owner lock
     342// - reacquire for cond var
     343// - recursive acquisition
     344// - ownership
     345struct simple_owner_lock {
     346        // List of blocked threads
     347        dlist( thread$ ) blocked_threads;
     348
     349        // Spin lock used for mutual exclusion
     350        __spinlock_t lock;
     351
     352        // owner showing if lock is held
     353        struct thread$ * owner;
     354
     355        size_t recursion_count;
     356};
     357
     358static inline void  ?{}( simple_owner_lock & this ) with(this) {
     359        lock{};
     360        blocked_threads{};
     361        owner = 0p;
     362        recursion_count = 0;
     363}
     364static inline void ^?{}( simple_owner_lock & this ) {}
     365static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
     366static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
     367
     368static inline void lock(simple_owner_lock & this) with(this) {
     369        if (owner == active_thread()) {
     370                recursion_count++;
     371                return;
     372        }
     373        lock( lock __cfaabi_dbg_ctx2 );
     374
     375        if (owner != 0p) {
     376                insert_last( blocked_threads, *active_thread() );
     377                unlock( lock );
     378                park( );
     379                return;
     380        }
     381        owner = active_thread();
     382        recursion_count = 1;
     383        unlock( lock );
     384}
     385
     386void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
     387        thread$ * t = &try_pop_front( blocked_threads );
     388        owner = t;
     389        recursion_count = ( t ? 1 : 0 );
     390        unpark( t );
     391}
     392
     393static inline void unlock(simple_owner_lock & this) with(this) {
     394        lock( lock __cfaabi_dbg_ctx2 );
     395        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     396        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
     397        // if recursion count is zero release lock and set new owner if one is waiting
     398        recursion_count--;
     399        if ( recursion_count == 0 ) {
     400                pop_and_set_new_owner( this );
     401        }
     402        unlock( lock );
     403}
     404
     405static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
     406        lock( lock __cfaabi_dbg_ctx2 );
     407        // lock held
     408        if ( owner != 0p ) {
     409                insert_last( blocked_threads, *t );
     410                unlock( lock );
     411        }
     412        // lock not held
     413        else {
     414                owner = t;
     415                recursion_count = 1;
     416                unpark( t );
     417                unlock( lock );
     418        }
     419}
     420
     421static inline size_t on_wait(simple_owner_lock & this) with(this) {
     422        lock( lock __cfaabi_dbg_ctx2 );
     423        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     424        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
     425
     426        size_t ret = recursion_count;
     427
     428        pop_and_set_new_owner( this );
     429
     430        unlock( lock );
     431        return ret;
     432}
     433
     434static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     435
     436//-----------------------------------------------------------------------------
     437// Spin Queue Lock
     438
     439// - No reacquire for cond var
     440// - No recursive acquisition
     441// - No ownership
     442// - spin lock with no locking/atomics in unlock
     443struct spin_queue_lock {
     444        // Spin lock used for mutual exclusion
     445        mcs_spin_lock lock;
     446
     447        // flag showing if lock is held
     448        bool held:1;
     449
     450        #ifdef __CFA_DEBUG__
     451        // for deadlock detection
     452        struct thread$ * owner;
     453        #endif
     454};
     455
     456static inline void  ?{}( spin_queue_lock & this ) with(this) {
     457        lock{};
     458        held = false;
     459}
     460static inline void ^?{}( spin_queue_lock & this ) {}
     461static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
     462static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
     463
     464// if this is called recursively IT WILL DEADLOCK!!!!!
     465static inline void lock(spin_queue_lock & this) with(this) {
     466        mcs_spin_node node;
     467        #ifdef __CFA_DEBUG__
     468        assert(!(held && owner == active_thread()));
     469        #endif
     470        lock( lock, node );
     471        while(held) Pause();
     472        held = true;
     473        unlock( lock, node );
     474        #ifdef __CFA_DEBUG__
     475        owner = active_thread();
     476        #endif
     477}
     478
     479static inline void unlock(spin_queue_lock & this) with(this) {
     480        #ifdef __CFA_DEBUG__
     481        owner = 0p;
     482        #endif
     483        held = false;
     484}
     485
     486static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
     487static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
     488static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
     489
     490
     491//-----------------------------------------------------------------------------
     492// MCS Block Spin Lock
     493
     494// - No reacquire for cond var
     495// - No recursive acquisition
     496// - No ownership
     497// - Blocks but first node spins (like spin queue but blocking for not first thd)
     498struct mcs_block_spin_lock {
     499        // Spin lock used for mutual exclusion
     500        mcs_lock lock;
     501
     502        // flag showing if lock is held
     503        bool held:1;
     504
     505        #ifdef __CFA_DEBUG__
     506        // for deadlock detection
     507        struct thread$ * owner;
     508        #endif
     509};
     510
     511static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
     512        lock{};
     513        held = false;
     514}
     515static inline void ^?{}( mcs_block_spin_lock & this ) {}
     516static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
     517static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
     518
     519// if this is called recursively IT WILL DEADLOCK!!!!!
     520static inline void lock(mcs_block_spin_lock & this) with(this) {
     521        mcs_node node;
     522        #ifdef __CFA_DEBUG__
     523        assert(!(held && owner == active_thread()));
     524        #endif
     525        lock( lock, node );
     526        while(held) Pause();
     527        held = true;
     528        unlock( lock, node );
     529        #ifdef __CFA_DEBUG__
     530        owner = active_thread();
     531        #endif
     532}
     533
     534static inline void unlock(mcs_block_spin_lock & this) with(this) {
     535        #ifdef __CFA_DEBUG__
     536        owner = 0p;
     537        #endif
     538        held = false;
     539}
     540
     541static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
     542static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
     543static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
     544
     545//-----------------------------------------------------------------------------
     546// Block Spin Lock
     547
     548// - No reacquire for cond var
     549// - No recursive acquisition
     550// - No ownership
     551// - Blocks but first node spins (like spin queue but blocking for not first thd)
     552struct block_spin_lock {
     553        // Spin lock used for mutual exclusion
     554        fast_block_lock lock;
     555
     556        // flag showing if lock is held
     557        bool held:1;
     558
     559        #ifdef __CFA_DEBUG__
     560        // for deadlock detection
     561        struct thread$ * owner;
     562        #endif
     563};
     564
     565static inline void  ?{}( block_spin_lock & this ) with(this) {
     566        lock{};
     567        held = false;
     568}
     569static inline void ^?{}( block_spin_lock & this ) {}
     570static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
     571static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
     572
     573// if this is called recursively IT WILL DEADLOCK!!!!!
     574static inline void lock(block_spin_lock & this) with(this) {
     575        #ifdef __CFA_DEBUG__
     576        assert(!(held && owner == active_thread()));
     577        #endif
     578        lock( lock );
     579        while(held) Pause();
     580        held = true;
     581        unlock( lock );
     582        #ifdef __CFA_DEBUG__
     583        owner = active_thread();
     584        #endif
     585}
     586
     587static inline void unlock(block_spin_lock & this) with(this) {
     588        #ifdef __CFA_DEBUG__
     589        owner = 0p;
     590        #endif
     591        held = false;
     592}
     593
     594static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
     595static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
     596static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
    255597
    256598//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/thread.cfa

    re5628db rf835806  
    5050        #endif
    5151
    52         seqable.next = 0p;
    53         seqable.back = 0p;
    54 
    5552        node.next = 0p;
    5653        node.prev = 0p;
     54
     55        clh_node = new( false );
     56
    5757        doregister(curr_cluster, this);
    58 
    5958        monitors{ &self_mon_p, 1, (fptr_t)0 };
    6059}
     
    6463                canary = 0xDEADDEADDEADDEADp;
    6564        #endif
     65        delete(clh_node);
    6666        unregister(curr_cluster, this);
    6767        ^self_cor{};
Note: See TracChangeset for help on using the changeset viewer.