Changes in / [720f2fe2:6e2b04e]


Ignore:
Files:
12 added
7 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/bits/locks.hfa

    r720f2fe2 r6e2b04e  
    2626        // Wrap in struct to prevent false sharing with debug info
    2727        volatile bool lock;
    28         #ifdef __CFA_DEBUG__
    29                 // previous function to acquire the lock
    30                 const char * prev_name;
    31                 // previous thread to acquire the lock
    32                 void* prev_thrd;
    33                 // keep track of number of times we had to spin, just in case the number is unexpectedly huge
    34                 size_t spin_count;
    35         #endif
    3628};
    3729
     
    4032                extern void disable_interrupts() OPTIONAL_THREAD;
    4133                extern void enable_interrupts( bool poll = true ) OPTIONAL_THREAD;
    42 
    43                 #ifdef __CFA_DEBUG__
    44                         void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]);
    45                 #else
    46                         #define __cfaabi_dbg_record_lock(x, y)
    47                 #endif
     34                #define __cfaabi_dbg_record_lock(x, y)
    4835        }
    4936
    5037        static inline void ?{}( __spinlock_t & this ) {
    5138                this.lock = 0;
    52                 #ifdef __CFA_DEBUG__
    53                         this.spin_count = 0;
    54                 #endif
    5539        }
    5640
     
    7761                for ( unsigned int i = 1;; i += 1 ) {
    7862                        if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break;
    79                         #ifdef __CFA_DEBUG__
    80                                 this.spin_count++;
    81                         #endif
    8263                        #ifndef NOEXPBACK
    8364                                // exponential spin
  • libcfa/src/concurrency/invoke.h

    r720f2fe2 r6e2b04e  
    195195                struct __monitor_group_t monitors;
    196196
    197                 // used to put threads on user data structures
    198                 struct {
    199                         struct thread$ * next;
    200                         struct thread$ * back;
    201                 } seqable;
    202 
    203197                // used to put threads on dlist data structure
    204198                __cfa_dlink(thread$);
     
    208202                        struct thread$ * prev;
    209203                } node;
     204
     205                // used to store state between clh lock/unlock
     206                volatile bool * clh_prev;
     207
     208                // used to point to this thd's current clh node
     209                volatile bool * clh_node;
    210210
    211211                struct processor * last_proc;
     
    240240                }
    241241
    242                 static inline thread$ * volatile & ?`next ( thread$ * this )  __attribute__((const)) {
    243                         return this->seqable.next;
    244                 }
    245 
    246                 static inline thread$ *& Back( thread$ * this ) __attribute__((const)) {
    247                         return this->seqable.back;
    248                 }
    249 
    250                 static inline thread$ *& Next( thread$ * this ) __attribute__((const)) {
    251                                 return this->seqable.next;
    252                 }
    253 
    254                 static inline bool listed( thread$ * this ) {
    255                         return this->seqable.next != 0p;
    256                 }
    257 
    258242                static inline void ?{}(__monitor_group_t & this) {
    259243                        (this.data){0p};
  • libcfa/src/concurrency/kernel.cfa

    r720f2fe2 r6e2b04e  
    834834#endif
    835835
    836 
    837 
    838 //-----------------------------------------------------------------------------
    839 // Debug
    840 __cfaabi_dbg_debug_do(
    841         extern "C" {
    842                 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]) {
    843                         this.prev_name = prev_name;
    844                         this.prev_thrd = kernelTLS().this_thread;
    845                 }
    846         }
    847 )
    848 
    849836//-----------------------------------------------------------------------------
    850837// Debug
  • libcfa/src/concurrency/locks.cfa

    r720f2fe2 r6e2b04e  
    219219        // this casts the alarm node to our wrapped type since we used type erasure
    220220        static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
     221
     222        struct pthread_alarm_node_wrap {
     223                alarm_node_t alarm_node;
     224                pthread_cond_var(L) * cond;
     225                info_thread(L) * info_thd;
     226        };
     227
     228        void ?{}( pthread_alarm_node_wrap(L) & this, Duration alarm, Duration period, Alarm_Callback callback, pthread_cond_var(L) * c, info_thread(L) * i ) {
     229                this.alarm_node{ callback, alarm, period };
     230                this.cond = c;
     231                this.info_thd = i;
     232        }
     233
     234        void ^?{}( pthread_alarm_node_wrap(L) & this ) { }
     235
     236        static void timeout_handler ( pthread_alarm_node_wrap(L) & this ) with( this ) {
     237                // This pthread_cond_var member is called from the kernel, and therefore, cannot block, but it can spin.
     238                lock( cond->lock __cfaabi_dbg_ctx2 );
     239
     240                // this check is necessary to avoid a race condition since this timeout handler
     241                //      may still be called after a thread has been removed from the queue but
     242                //      before the alarm is unregistered
     243                if ( (*info_thd)`isListed ) {   // is thread on queue
     244                        info_thd->signalled = false;
     245                        // remove this thread O(1)
     246                        remove( *info_thd );
     247                        on_notify(*info_thd->lock, info_thd->t);
     248                }
     249                unlock( cond->lock );
     250        }
     251
     252        // this casts the alarm node to our wrapped type since we used type erasure
     253        static void pthread_alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (pthread_alarm_node_wrap(L) &)a ); }
    221254}
    222255
     
    388421                on_wakeup(*i.lock, recursion_count);
    389422        }
    390 }
    391 
     423
     424        //-----------------------------------------------------------------------------
     425        // pthread_cond_var
     426
     427        void  ?{}( pthread_cond_var(L) & this ) with(this) {
     428                blocked_threads{};
     429                lock{};
     430        }
     431
     432        void ^?{}( pthread_cond_var(L) & this ) { }
     433
     434        bool notify_one( pthread_cond_var(L) & this ) with(this) {
     435                lock( lock __cfaabi_dbg_ctx2 );
     436                bool ret = ! blocked_threads`isEmpty;
     437                if ( ret ) {
     438                        info_thread(L) & popped = try_pop_front( blocked_threads );
     439                        on_notify(*popped.lock, popped.t);
     440                }
     441                unlock( lock );
     442                return ret;
     443        }
     444
     445        bool notify_all( pthread_cond_var(L) & this ) with(this) {
     446                lock( lock __cfaabi_dbg_ctx2 );
     447                bool ret = ! blocked_threads`isEmpty;
     448                while( ! blocked_threads`isEmpty ) {
     449                        info_thread(L) & popped = try_pop_front( blocked_threads );
     450                        on_notify(*popped.lock, popped.t);
     451                }
     452                unlock( lock );
     453                return ret;
     454        }
     455
     456        uintptr_t front( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; }
     457        bool empty ( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }
     458
     459        static size_t queue_and_get_recursion( pthread_cond_var(L) & this, info_thread(L) * i ) with(this) {
     460                // add info_thread to waiting queue
     461                insert_last( blocked_threads, *i );
     462                size_t recursion_count = 0;
     463                recursion_count = on_wait( *i->lock );
     464                return recursion_count;
     465        }
     466       
     467        static void queue_info_thread_timeout( pthread_cond_var(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
     468                lock( lock __cfaabi_dbg_ctx2 );
     469                size_t recursion_count = queue_and_get_recursion(this, &info);
     470                pthread_alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info };
     471                register_self( &node_wrap.alarm_node );
     472                unlock( lock );
     473
     474                // blocks here
     475                park();
     476
     477                // unregisters alarm so it doesn't go off if this happens first
     478                unregister_self( &node_wrap.alarm_node );
     479
     480                // resets recursion count here after waking
     481                if (info.lock) on_wakeup(*info.lock, recursion_count);
     482        }
     483
     484        void wait( pthread_cond_var(L) & this, L & l ) with(this) {
     485                wait( this, l, 0 );
     486        }
     487
     488        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info ) with(this) {
     489                lock( lock __cfaabi_dbg_ctx2 );
     490                info_thread( L ) i = { active_thread(), info, &l };
     491                size_t recursion_count = queue_and_get_recursion(this, &i);
     492                unlock( lock );
     493                park( );
     494                on_wakeup(*i.lock, recursion_count);
     495        }
     496
     497        #define PTHREAD_WAIT_TIME( u, l, t ) \
     498                info_thread( L ) i = { active_thread(), u, l }; \
     499                queue_info_thread_timeout(this, i, t, pthread_alarm_node_wrap_cast ); \
     500                return i.signalled;
     501
     502        bool wait( pthread_cond_var(L) & this, L & l, timespec t ) {
     503                Duration d = { t };
     504                WAIT_TIME( 0, &l , d )
     505        }
     506       
     507        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t  ) {
     508                Duration d = { t };
     509                WAIT_TIME( info, &l , d )
     510        }
     511}
    392512//-----------------------------------------------------------------------------
    393513// Semaphore
  • libcfa/src/concurrency/locks.hfa

    r720f2fe2 r6e2b04e  
    101101
    102102//-----------------------------------------------------------------------------
     103// MCS Spin Lock
     104// - No recursive acquisition
     105// - Needs to be released by owner
     106
     107struct mcs_spin_node {
     108        mcs_spin_node * volatile next;
     109        bool locked:1;
     110};
     111
     112struct mcs_spin_queue {
     113        mcs_spin_node * volatile tail;
     114};
     115
     116static inline void ?{}(mcs_spin_node & this) { this.next = 0p; this.locked = true; }
     117
     118static inline mcs_spin_node * volatile & ?`next ( mcs_spin_node * node ) {
     119        return node->next;
     120}
     121
     122struct mcs_spin_lock {
     123        mcs_spin_queue queue;
     124};
     125
     126static inline void lock(mcs_spin_lock & l, mcs_spin_node & n) {
     127        mcs_spin_node * prev = __atomic_exchange_n(&l.queue.tail, &n, __ATOMIC_SEQ_CST);
     128        if(prev != 0p) {
     129                prev->next = &n;
     130                while(n.locked) Pause();
     131        }
     132}
     133
     134static inline void unlock(mcs_spin_lock & l, mcs_spin_node & n) {
     135        mcs_spin_node * n_ptr = &n;
     136        if (!__atomic_compare_exchange_n(&l.queue.tail, &n_ptr, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     137                while (n.next == 0p) {}
     138                n.next->locked = false;
     139        }
     140}
     141
     142//-----------------------------------------------------------------------------
     143// CLH Spinlock
     144// - No recursive acquisition
     145// - Needs to be released by owner
     146
     147struct clh_lock {
     148        volatile bool * volatile tail;
     149};
     150
     151static inline void  ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }
     152static inline void ^?{}( clh_lock & this ) { free(this.tail); }
     153
     154static inline void lock(clh_lock & l) {
     155        thread$ * curr_thd = active_thread();
     156        *(curr_thd->clh_node) = false;
     157        volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);
     158        while(!__atomic_load_n(prev, __ATOMIC_ACQUIRE)) Pause();
     159        curr_thd->clh_prev = prev;
     160}
     161
     162static inline void unlock(clh_lock & l) {
     163        thread$ * curr_thd = active_thread();
     164        __atomic_store_n(curr_thd->clh_node, true, __ATOMIC_RELEASE);
     165        curr_thd->clh_node = curr_thd->clh_prev;
     166}
     167
     168//-----------------------------------------------------------------------------
    103169// Linear backoff Spinlock
    104170struct linear_backoff_then_block_lock {
     
    205271// Fast Block Lock
    206272
    207 // High efficiency minimal blocking lock
     273// minimal blocking lock
    208274// - No reacquire for cond var
    209275// - No recursive acquisition
    210276// - No ownership
    211277struct fast_block_lock {
     278        // List of blocked threads
     279        dlist( thread$ ) blocked_threads;
     280
    212281        // Spin lock used for mutual exclusion
    213282        __spinlock_t lock;
    214283
    215         // List of blocked threads
    216         dlist( thread$ ) blocked_threads;
    217 
     284        // flag showing if lock is held
    218285        bool held:1;
     286
     287        #ifdef __CFA_DEBUG__
     288        // for deadlock detection
     289        struct thread$ * owner;
     290        #endif
    219291};
    220292
     
    231303static inline void lock(fast_block_lock & this) with(this) {
    232304        lock( lock __cfaabi_dbg_ctx2 );
     305
     306        #ifdef __CFA_DEBUG__
     307        assert(!(held && owner == active_thread()));
     308        #endif
    233309        if (held) {
    234310                insert_last( blocked_threads, *active_thread() );
     
    238314        }
    239315        held = true;
     316        #ifdef __CFA_DEBUG__
     317        owner = active_thread();
     318        #endif
    240319        unlock( lock );
    241320}
     
    246325        thread$ * t = &try_pop_front( blocked_threads );
    247326        held = ( t ? true : false );
     327        #ifdef __CFA_DEBUG__
     328        owner = ( t ? t : 0p );
     329        #endif
    248330        unpark( t );
    249331        unlock( lock );
     
    253335static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; }
    254336static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { }
     337
     338//-----------------------------------------------------------------------------
     339// simple_owner_lock
     340
     341// pthread owner lock
     342// - reacquire for cond var
     343// - recursive acquisition
     344// - ownership
     345struct simple_owner_lock {
     346        // List of blocked threads
     347        dlist( thread$ ) blocked_threads;
     348
     349        // Spin lock used for mutual exclusion
     350        __spinlock_t lock;
     351
     352        // owner showing if lock is held
     353        struct thread$ * owner;
     354
     355        size_t recursion_count;
     356};
     357
     358static inline void  ?{}( simple_owner_lock & this ) with(this) {
     359        lock{};
     360        blocked_threads{};
     361        owner = 0p;
     362        recursion_count = 0;
     363}
     364static inline void ^?{}( simple_owner_lock & this ) {}
     365static inline void ?{}( simple_owner_lock & this, simple_owner_lock this2 ) = void;
     366static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void;
     367
     368static inline void lock(simple_owner_lock & this) with(this) {
     369        if (owner == active_thread()) {
     370                recursion_count++;
     371                return;
     372        }
     373        lock( lock __cfaabi_dbg_ctx2 );
     374
     375        if (owner != 0p) {
     376                insert_last( blocked_threads, *active_thread() );
     377                unlock( lock );
     378                park( );
     379                return;
     380        }
     381        owner = active_thread();
     382        recursion_count = 1;
     383        unlock( lock );
     384}
     385
     386// TODO: fix duplicate def issue and bring this back
     387// void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) {
     388        // thread$ * t = &try_pop_front( blocked_threads );
     389        // owner = t;
     390        // recursion_count = ( t ? 1 : 0 );
     391        // unpark( t );
     392// }
     393
     394static inline void unlock(simple_owner_lock & this) with(this) {
     395        lock( lock __cfaabi_dbg_ctx2 );
     396        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     397        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
     398        // if recursion count is zero release lock and set new owner if one is waiting
     399        recursion_count--;
     400        if ( recursion_count == 0 ) {
     401                // pop_and_set_new_owner( this );
     402                thread$ * t = &try_pop_front( blocked_threads );
     403                owner = t;
     404                recursion_count = ( t ? 1 : 0 );
     405                unpark( t );
     406        }
     407        unlock( lock );
     408}
     409
     410static inline void on_notify(simple_owner_lock & this, struct thread$ * t ) with(this) {
     411        lock( lock __cfaabi_dbg_ctx2 );
     412        // lock held
     413        if ( owner != 0p ) {
     414                insert_last( blocked_threads, *t );
     415                unlock( lock );
     416        }
     417        // lock not held
     418        else {
     419                owner = t;
     420                recursion_count = 1;
     421                unpark( t );
     422                unlock( lock );
     423        }
     424}
     425
     426static inline size_t on_wait(simple_owner_lock & this) with(this) {
     427        lock( lock __cfaabi_dbg_ctx2 );
     428        /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this );
     429        /* paranoid */ verifyf( owner == active_thread(), "Thread %p other than the owner %p attempted to release owner lock %p", owner, active_thread(), &this );
     430
     431        size_t ret = recursion_count;
     432
     433        // pop_and_set_new_owner( this );
     434
     435        thread$ * t = &try_pop_front( blocked_threads );
     436        owner = t;
     437        recursion_count = ( t ? 1 : 0 );
     438        unpark( t );
     439
     440        unlock( lock );
     441        return ret;
     442}
     443
     444static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; }
     445
     446//-----------------------------------------------------------------------------
     447// Spin Queue Lock
     448
     449// - No reacquire for cond var
     450// - No recursive acquisition
     451// - No ownership
     452// - spin lock with no locking/atomics in unlock
     453struct spin_queue_lock {
     454        // Spin lock used for mutual exclusion
     455        mcs_spin_lock lock;
     456
     457        // flag showing if lock is held
     458        bool held:1;
     459
     460        #ifdef __CFA_DEBUG__
     461        // for deadlock detection
     462        struct thread$ * owner;
     463        #endif
     464};
     465
     466static inline void  ?{}( spin_queue_lock & this ) with(this) {
     467        lock{};
     468        held = false;
     469}
     470static inline void ^?{}( spin_queue_lock & this ) {}
     471static inline void ?{}( spin_queue_lock & this, spin_queue_lock this2 ) = void;
     472static inline void ?=?( spin_queue_lock & this, spin_queue_lock this2 ) = void;
     473
     474// if this is called recursively IT WILL DEADLOCK!!!!!
     475static inline void lock(spin_queue_lock & this) with(this) {
     476        mcs_spin_node node;
     477        #ifdef __CFA_DEBUG__
     478        assert(!(held && owner == active_thread()));
     479        #endif
     480        lock( lock, node );
     481        while(held) Pause();
     482        held = true;
     483        unlock( lock, node );
     484        #ifdef __CFA_DEBUG__
     485        owner = active_thread();
     486        #endif
     487}
     488
     489static inline void unlock(spin_queue_lock & this) with(this) {
     490        #ifdef __CFA_DEBUG__
     491        owner = 0p;
     492        #endif
     493        held = false;
     494}
     495
     496static inline void on_notify(spin_queue_lock & this, struct thread$ * t ) { unpark(t); }
     497static inline size_t on_wait(spin_queue_lock & this) { unlock(this); return 0; }
     498static inline void on_wakeup(spin_queue_lock & this, size_t recursion ) { }
     499
     500
     501//-----------------------------------------------------------------------------
     502// MCS Block Spin Lock
     503
     504// - No reacquire for cond var
     505// - No recursive acquisition
     506// - No ownership
     507// - Blocks but first node spins (like spin queue but blocking for not first thd)
     508struct mcs_block_spin_lock {
     509        // Spin lock used for mutual exclusion
     510        mcs_lock lock;
     511
     512        // flag showing if lock is held
     513        bool held:1;
     514
     515        #ifdef __CFA_DEBUG__
     516        // for deadlock detection
     517        struct thread$ * owner;
     518        #endif
     519};
     520
     521static inline void  ?{}( mcs_block_spin_lock & this ) with(this) {
     522        lock{};
     523        held = false;
     524}
     525static inline void ^?{}( mcs_block_spin_lock & this ) {}
     526static inline void ?{}( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
     527static inline void ?=?( mcs_block_spin_lock & this, mcs_block_spin_lock this2 ) = void;
     528
     529// if this is called recursively IT WILL DEADLOCK!!!!!
     530static inline void lock(mcs_block_spin_lock & this) with(this) {
     531        mcs_node node;
     532        #ifdef __CFA_DEBUG__
     533        assert(!(held && owner == active_thread()));
     534        #endif
     535        lock( lock, node );
     536        while(held) Pause();
     537        held = true;
     538        unlock( lock, node );
     539        #ifdef __CFA_DEBUG__
     540        owner = active_thread();
     541        #endif
     542}
     543
     544static inline void unlock(mcs_block_spin_lock & this) with(this) {
     545        #ifdef __CFA_DEBUG__
     546        owner = 0p;
     547        #endif
     548        held = false;
     549}
     550
     551static inline void on_notify(mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }
     552static inline size_t on_wait(mcs_block_spin_lock & this) { unlock(this); return 0; }
     553static inline void on_wakeup(mcs_block_spin_lock & this, size_t recursion ) { }
     554
     555//-----------------------------------------------------------------------------
     556// Block Spin Lock
     557
     558// - No reacquire for cond var
     559// - No recursive acquisition
     560// - No ownership
     561// - Blocks but first node spins (like spin queue but blocking for not first thd)
     562struct block_spin_lock {
     563        // Spin lock used for mutual exclusion
     564        fast_block_lock lock;
     565
     566        // flag showing if lock is held
     567        bool held:1;
     568
     569        #ifdef __CFA_DEBUG__
     570        // for deadlock detection
     571        struct thread$ * owner;
     572        #endif
     573};
     574
     575static inline void  ?{}( block_spin_lock & this ) with(this) {
     576        lock{};
     577        held = false;
     578}
     579static inline void ^?{}( block_spin_lock & this ) {}
     580static inline void ?{}( block_spin_lock & this, block_spin_lock this2 ) = void;
     581static inline void ?=?( block_spin_lock & this, block_spin_lock this2 ) = void;
     582
     583// if this is called recursively IT WILL DEADLOCK!!!!!
     584static inline void lock(block_spin_lock & this) with(this) {
     585        #ifdef __CFA_DEBUG__
     586        assert(!(held && owner == active_thread()));
     587        #endif
     588        lock( lock );
     589        while(held) Pause();
     590        held = true;
     591        unlock( lock );
     592        #ifdef __CFA_DEBUG__
     593        owner = active_thread();
     594        #endif
     595}
     596
     597static inline void unlock(block_spin_lock & this) with(this) {
     598        #ifdef __CFA_DEBUG__
     599        owner = 0p;
     600        #endif
     601        held = false;
     602}
     603
     604static inline void on_notify(block_spin_lock & this, struct thread$ * t ) { unpark(t); }
     605static inline size_t on_wait(block_spin_lock & this) { unlock(this); return 0; }
     606static inline void on_wakeup(block_spin_lock & this, size_t recursion ) { }
    255607
    256608//-----------------------------------------------------------------------------
     
    332684        // - signalling without holding branded lock is UNSAFE!
    333685        // - only allows usage of one lock, cond var is branded after usage
     686
    334687        struct fast_cond_var {
    335688                // List of blocked threads
    336689                dlist( info_thread(L) ) blocked_threads;
    337 
    338690                #ifdef __CFA_DEBUG__
    339691                L * lock_used;
     
    341693        };
    342694
    343 
    344695        void  ?{}( fast_cond_var(L) & this );
    345696        void ^?{}( fast_cond_var(L) & this );
     
    349700
    350701        uintptr_t front( fast_cond_var(L) & this );
    351 
    352702        bool empty  ( fast_cond_var(L) & this );
    353703
    354704        void wait( fast_cond_var(L) & this, L & l );
    355705        void wait( fast_cond_var(L) & this, L & l, uintptr_t info );
    356 }
     706
     707
     708        //-----------------------------------------------------------------------------
     709        // pthread_cond_var
     710        //
     711        // - cond var with minimal footprint
     712        // - supports operations needed for phthread cond
     713
     714        struct pthread_cond_var {
     715                dlist( info_thread(L) ) blocked_threads;
     716                __spinlock_t lock;
     717        };
     718
     719        void  ?{}( pthread_cond_var(L) & this );
     720        void ^?{}( pthread_cond_var(L) & this );
     721
     722        bool notify_one( pthread_cond_var(L) & this );
     723        bool notify_all( pthread_cond_var(L) & this );
     724
     725        uintptr_t front( pthread_cond_var(L) & this );
     726        bool empty ( pthread_cond_var(L) & this );
     727
     728        void wait( pthread_cond_var(L) & this, L & l );
     729        void wait( pthread_cond_var(L) & this, L & l, uintptr_t info );
     730        bool wait( pthread_cond_var(L) & this, L & l, timespec t );
     731        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t );
     732}
  • libcfa/src/concurrency/thread.cfa

    r720f2fe2 r6e2b04e  
    5353        #endif
    5454
    55         seqable.next = 0p;
    56         seqable.back = 0p;
    57 
    5855        node.next = 0p;
    5956        node.prev = 0p;
     57
     58        clh_node = malloc( );
     59        *clh_node = false;
     60
    6061        doregister(curr_cluster, this);
    61 
    6262        monitors{ &self_mon_p, 1, (fptr_t)0 };
    6363}
     
    6767                canary = 0xDEADDEADDEADDEADp;
    6868        #endif
     69        free(clh_node);
    6970        unregister(curr_cluster, this);
    7071        ^self_cor{};
  • libcfa/src/startup.cfa

    r720f2fe2 r6e2b04e  
    6363
    6464struct __spinlock_t;
    65 extern "C" {
    66         void __cfaabi_dbg_record_lock(struct __spinlock_t & this, const char prev_name[]) __attribute__(( weak )) libcfa_public {}
    67 }
    6865
    6966// Local Variables: //
Note: See TracChangeset for help on using the changeset viewer.