Changes in / [f85de47:65bd3c2]


Ignore:
Files:
33 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/bits/collection.hfa

    rf85de47 r65bd3c2  
    2626        // PUBLIC
    2727
    28         void ?{}( Colable & co ) with( co ) {
    29                 next = 0p;
     28        void ?{}( Colable & co ) {
     29                co.next = 0p;
    3030        } // post: ! listed()
    3131
    3232        // return true iff *this is an element of a collection
    33         bool listed( Colable & co ) with( co ) {                        // pre: this != 0
    34                 return next != 0p;
     33        bool listed( Colable & co ) {                                           // pre: this != 0
     34                return co.next != 0p;
    3535        }
    3636
  • libcfa/src/bits/queue.hfa

    rf85de47 r65bd3c2  
    2424                Queue(T) & ?=?( const Queue(T) & ) = void;              // no assignment
    2525
    26                 void ?{}( Queue(T) & q ) with( q ) {
     26                void ?{}( Queue(T) & q ) {
    2727                        ((Collection &)q){};
    28                         last = 0p;
     28                        q.last = 0p;
    2929                } // post: empty()
    3030
    31                 T & tail( Queue(T) & q ) with( q ) {
    32                         return *last;
     31                T & tail( Queue(T) & q ) {
     32                        return *q.last;
    3333                }
    3434
     
    4646                        if ( listed( &n ) ) abort( "(Queue &)%p.addHead( %p ) : Node is already on another list.", &q, &n );
    4747                        #endif // __CFA_DEBUG__
    48                         if ( last ) {
     48                        if ( q.last ) {
    4949                                Next( &n ) = &head( q );
    5050                                q.root = &n;
     
    6060                        if ( listed( &n ) ) abort( "(Queue &)%p.addTail( %p ) : Node is already on another list.", &q, &n );
    6161                        #endif // __CFA_DEBUG__
    62                         if ( last ) Next( last ) = &n;
     62                        if ( q.last ) Next( last ) = &n;
    6363                        else root = &n;
    6464                        last = &n;
  • libcfa/src/collections/list.hfa

    rf85de47 r65bd3c2  
    1010// Created On       : Wed Apr 22 18:00:00 2020
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sun Apr 20 19:04:50 2025
    13 // Update Count     : 51
     12// Last Modified On : Thu Apr 24 18:12:59 2025
     13// Update Count     : 72
    1414//
    1515
     
    7272
    7373// The origin is the position encountered at the start of iteration, signifying, "need to advance to the first element,"
    74 // and at the end of iteration, signifying, "no more elements."  Normal comsumption of an iterator runs ?`moveNext as
    75 // the first step, and uses the return of ?`moveNext as a guard, before dereferencing the iterator.  So normal
     74// and at the end of iteration, signifying, "no more elements."  Normal comsumption of an iterator runs "advance" as
     75// the first step, and uses the return of "advance" as a guard, before dereferencing the iterator.  So normal
    7676// consumption of an iterator does not dereference an iterator in origin position.  The value of a pointer (underlying a
    7777// refence) that is exposed publicly as an iteraor, and also a pointer stored internally in a link field, is tagged, to
     
    128128
    129129static inline forall( tE &, tLinks & | embedded( tE, tLinks, dlink( tE ) ) ) {
     130        bool isListed( tE & node ) {
     131                verify( &node != 0p );
     132                dlink( tE ) & node_links = node`inner;
     133                return (node_links.prev != 0p) || (node_links.next != 0p);
     134        }
     135
     136        bool isEmpty( dlist( tE, tLinks ) & list ) {
     137                tE * firstPtr = list.next;
     138                if ( ORIGIN_TAG_QUERY(( size_t)firstPtr) ) firstPtr = 0p;
     139                return firstPtr == 0p;
     140        }
     141
     142        tE & first( dlist( tE, tLinks ) & list ) {
     143                tE * firstPtr = list.next;
     144                if ( ORIGIN_TAG_QUERY( (size_t)firstPtr ) ) firstPtr = 0p;
     145                return *firstPtr;
     146        }
     147
     148        tE & last( dlist( tE, tLinks ) & list ) {
     149                tE * lastPtr = list.prev;
     150                if ( ORIGIN_TAG_QUERY( (size_t)lastPtr) ) lastPtr = 0p;
     151                return *lastPtr;
     152        }
     153
    130154        tE & insert_before( tE & before, tE & node ) {
    131155                verify( &before != 0p );
     
    194218        }
    195219
    196         tE & ?`first( dlist( tE, tLinks ) & list ) {
    197                 tE * firstPtr = list.next;
    198                 if ( ORIGIN_TAG_QUERY( (size_t)firstPtr ) ) firstPtr = 0p;
    199                 return *firstPtr;
    200         }
    201 
    202         tE & ?`last( dlist( tE, tLinks ) & list ) {
    203                 tE * lastPtr = list.prev;
    204                 if ( ORIGIN_TAG_QUERY( (size_t)lastPtr) ) lastPtr = 0p;
    205                 return *lastPtr;
    206         }
    207 
    208         bool ?`isEmpty( dlist( tE, tLinks ) & list ) {
    209                 tE * firstPtr = list.next;
    210                 if ( ORIGIN_TAG_QUERY(( size_t)firstPtr) ) firstPtr = 0p;
    211                 return firstPtr == 0p;
    212         }
    213 
    214         bool ?`isListed( tE & node ) {
    215                 verify( &node != 0p );
    216                 dlink( tE ) & node_links = node`inner;
    217                 return (node_links.prev != 0p) || (node_links.next != 0p);
    218         }
    219 
    220         tE & ?`elems( dlist( tE, tLinks ) & list ) {
     220        tE & iter( dlist( tE, tLinks ) & list ) {
    221221                tE * origin = $get_list_origin_addr( list );
    222222                return *origin;
    223223        }
    224         tE & ?`head( dlist( tE, tLinks ) & list ) {
    225                 return list`elems;
    226         }
    227 
    228         bool ?`moveNext( tE && refx ) {
     224
     225        bool recede( tE && refx ) {
     226                tE && ref_inner = refx;
     227                tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner );
     228                &ref_inner = oldReferent`inner.prev;
     229                return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner );
     230        }
     231
     232        bool advance( tE && refx ) {
    229233                tE && ref_inner = refx;
    230234                tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner );
     
    232236                return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner );
    233237        }
    234         bool ?`next( tE && refx ) {                                                     // alternate name
    235                 return refx`moveNext;
    236         }
    237 
    238         bool ?`movePrev( tE && refx ) {
    239                 tE && ref_inner = refx;
    240                 tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner );
    241                 &ref_inner = oldReferent`inner.prev;
    242                 return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner );
    243         }
    244         bool ?`prev( tE && refx ) {                                                     // alternate name
    245                 return refx`movePrev;
    246         }
    247 
    248         bool ?`hasNext( tE & node ) {
    249                 return node`moveNext;
    250         }
    251 
    252         bool ?`hasPrev( tE & node ) {
    253                 return node`movePrev;
    254         }
    255 
    256         tE & ?`next( tE & node ) {
    257                 if ( node`moveNext ) return node;
     238
     239    bool isFirst( tE & node ) {
     240        return recede( node );
     241    }
     242
     243    bool isLast( tE & node ) {
     244        return advance( node );
     245    }
     246
     247        tE & prev( tE & node ) {
     248                if ( recede( node ) ) return node;
    258249                return *0p;
    259250        }
    260251
    261         tE & ?`prev( tE & node ) {
    262                 if ( node`movePrev ) return node;
     252        tE & next( tE & node ) {
     253                if ( advance( node ) ) return node;
    263254                return *0p;
    264255        }
    265256
    266257        tE & insert_first( dlist( tE, tLinks ) & list, tE & node ) {
    267                 insert_after( list`elems, node );
     258                insert_after( iter( list ), node );
    268259                return node;
    269260        }
    270261
    271262        tE & insert_last( dlist( tE, tLinks ) & list, tE & node ) {
    272                 insert_before( list`elems, node );
    273                 return node;
    274         }
    275         tE &  insert( dlist( tE, tLinks ) & list, tE & node ) { // alternate name
     263                insert_before( iter( list ), node );
     264                return node;
     265        }
     266        tE & insert( dlist( tE, tLinks ) & list, tE & node ) { // synonym for insert_last
    276267                insert_last( list, node );
    277268                return node;
     
    279270
    280271        tE & remove_first( dlist( tE, tLinks ) & list ) {
    281                 return remove( list`first );
     272                tE & first_node = first( list );
     273                if ( &first_node ) return remove( first_node );
     274                return first_node;
    282275        }
    283276
    284277        tE & remove_last( dlist( tE, tLinks ) & list ) {
    285                 return remove( list`last );
     278                tE & last_node = last( list );
     279                if ( &last_node ) return remove( last_node );
     280                return last_node;
    286281        }
    287282
     
    322317//      }
    323318
    324         tE & try_pop_front( dlist( tE, tLinks ) & list ) {
    325                 tE & first_inlist = list`first;
    326                 tE & first_item = first_inlist;
    327                 if ( &first_item ) remove( first_inlist );
    328                 return first_item;
    329         }
    330 
    331         tE & try_pop_back( dlist( tE, tLinks ) & list ) {
    332                 tE & last_inlist = list`last;
    333                 tE & last_item = last_inlist;
    334                 if ( &last_item ) remove( last_inlist );
    335                 return last_item;
    336         }
    337 
    338 
    339319        #if ! defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__))
    340320        bool $validate_fwd( dlist( tE, tLinks ) & this ) {
    341                 if ( ! & this`first ) return &this`last == 0p;
     321                if ( ! & first( this ) ) return &last( this ) == 0p;
    342322
    343323                tE & lagElem = *0p;
    344                 while ( tE & it = this`elems; it`moveNext ) {
    345                         if ( & lagElem == 0p &&  &it != & this`first ) return false;
     324                while ( tE & it = iter( this ); advance( it ) ) {
     325                        if ( & lagElem == 0p &&  &it != & first( this ) ) return false;
    346326                        &lagElem = ⁢
    347327                }
    348328
    349                 if ( &lagElem != &this`last ) return false;
    350 
    351                 // TODO: verify that it is back at this`elems;
     329                if ( &lagElem != &last( this ) ) return false;
     330
     331                // TODO: verify that it is back at iter( this );
    352332                return true;
    353333        }
    354334
    355335        bool $validate_rev( dlist( tE, tLinks ) & this ) {
    356                 if ( ! & this`last ) return &this`first == 0p;
     336                if ( ! & last( this ) ) return &first( this ) == 0p;
    357337
    358338                tE & lagElem = *0p;
    359                 while ( tE & it = this`elems; it`movePrev ) {
    360                         if ( &lagElem == 0p && &it != & this`last ) return false;
     339                while ( tE & it = iter( this ); recede( it ) ) {
     340                        if ( &lagElem == 0p && &it != & last( this ) ) return false;
    361341                        &lagElem = ⁢
    362342                }
    363343
    364                 if ( &lagElem != &this`first ) return false;
    365 
    366                 // TODO: verify that it is back at this`elems;
     344                if ( &lagElem != &first( this ) ) return false;
     345
     346                // TODO: verify that it is back at iter( this );
    367347                return true;
    368348        }
     
    375355
    376356// TEMPORARY, until foreach statement created.
    377 #define FOREACH( list, index ) for ( typeof((list)`head) & (index) = (list)`head; (index)`next; )
    378 #define FOREACH_REV( list, index ) for ( typeof((list)`head) & (index) = (list)`head; (index)`prev; )
    379 #define FOREACH_COND( list, index, expr ) for ( typeof((list)`head) & (index) = (list)`head; (index)`next && !(expr); )
    380 #define FOREACH_REV_COND( list, index, expr ) for ( typeof((list)`head) & (index) = (list)`head; (index)`prev && !(expr); )
     357#define FOREACH( list, index ) for ( typeof(iter( list )) & (index) = iter( list ); advance( index ); )
     358#define FOREACH_REV( list, index ) for ( typeof(iter( list )) & (index) = iter( list ); recede( index ); )
     359#define FOREACH_COND( list, index, expr ) for ( typeof(iter( list )) & (index) = iter( list ); advance( index ) && !(expr); )
     360#define FOREACH_REV_COND( list, index, expr ) for ( typeof(iter( list )) & (index) = iter( list ); recede( index ) && !(expr); )
  • libcfa/src/collections/lockfree.hfa

    rf85de47 r65bd3c2  
    1616        };
    1717
    18         static inline void ?{}(mcs_queue(T) & this) { this.tail = 0p; }
    19         static inline bool empty(const mcs_queue(T) & this) { return !this.tail; }
    20 
    21         static inline forall(| { T * volatile & ?`next ( T * ); })
    22         {
     18        static inline void ?{}( mcs_queue(T) & this ) { this.tail = 0p; }
     19        static inline bool empty( const mcs_queue(T) & this ) { return ! this.tail; }
     20
     21        static inline forall( | { T * volatile & next ( T * ); }) {
    2322                // Adds an element to the list
    2423                // Multi-Thread Safe, Lock-Free
    25                 T * push(mcs_queue(T) & this, T * elem) __attribute__((artificial));
    26                 T * push(mcs_queue(T) & this, T * elem) {
    27                         /* paranoid */ verify(!(elem`next));
     24                T * push( mcs_queue(T) & this, T * elem ) __attribute__((artificial));
     25                T * push( mcs_queue(T) & this, T * elem ) {
     26                        /* paranoid */ verify( ! next( elem ) );
    2827                        // Race to add to the tail
    29                         T * prev = __atomic_exchange_n(&this.tail, elem, __ATOMIC_SEQ_CST);
     28                        T * prev_val = __atomic_exchange_n(&this.tail, elem, __ATOMIC_SEQ_CST);
    3029                        // If we aren't the first, we need to tell the person before us
    3130                        // No need to
    32                         if (prev) prev`next = elem;
    33                         return prev;
     31                        if ( prev_val ) next( prev_val ) = elem;
     32                        return prev_val;
    3433                }
    3534
     
    3736                // Passing an element that is not the head is undefined behavior
    3837                // NOT Multi-Thread Safe, concurrent pushes are safe
    39                 T * advance(mcs_queue(T) & this, T * elem) __attribute__((artificial));
    40                 T * advance(mcs_queue(T) & this, T * elem) {
     38                T * advance( mcs_queue(T) & this, T * elem ) __attribute__((artificial));
     39                T * advance( mcs_queue(T) & this, T * elem ) {
    4140                        T * expected = elem;
    4241                        // Check if this is already the last item
     
    4443
    4544                        // If not wait for next item to show-up, filled by push
    46                         while (!(elem`next)) Pause();
     45                        while ( ! next( elem ) ) Pause();
    4746
    4847                        // we need to return if the next link was empty
    49                         T * ret = elem`next;
     48                        T * ret = next( elem );
    5049
    5150                        // invalidate link to reset to initial state
    52                         elem`next = 0p;
     51                        next( elem ) = 0p;
    5352                        return ret;
    5453                }
     
    6564        };
    6665
    67         static inline void ?{}(mpsc_queue(T) & this) {
     66        static inline void ?{}( mpsc_queue(T) & this ) {
    6867                ((mcs_queue(T)&)this){};
    6968                this.head = 0p;
    7069        }
    7170
    72         static inline forall(| { T * volatile & ?`next ( T * ); })
    73         {
     71        static inline forall( | { T * volatile & next ( T * ); }) {
    7472                // Added a new element to the queue
    7573                // Multi-Thread Safe, Lock-Free
    76                 T * push(mpsc_queue(T) & this, T * elem) __attribute__((artificial));
    77                 T * push(mpsc_queue(T) & this, T * elem) {
    78                         T * prev = push((mcs_queue(T)&)this, elem);
    79                         if (!prev) this.head = elem;
    80                         return prev;
     74                T * push( mpsc_queue(T) & this, T * elem ) __attribute__((artificial));
     75                T * push( mpsc_queue(T) & this, T * elem ) {
     76                        T * prev_val = push( (mcs_queue(T)&)this, elem );
     77                        if ( ! prev_val ) this.head = elem;
     78                        return prev_val;
    8179                }
    8280
    8381                // Pop an element from the queue
    8482                // return the element that was removed
    85                 // next is set to the new head of the queue
     83                // head is set to the new head of the queue
    8684                // NOT Multi-Thread Safe
    87                 T * pop(mpsc_queue(T) & this, T *& next) __attribute__((artificial));
    88                 T * pop(mpsc_queue(T) & this, T *& next) {
     85                T * pop( mpsc_queue(T) & this, T *& head ) __attribute__((artificial));
     86                T * pop( mpsc_queue(T) & this, T *& head ) {
    8987                        T * elem = this.head;
    9088                        // If head is empty just return
    91                         if (!elem) return 0p;
     89                        if ( ! elem ) return 0p;
    9290
    9391                        // If there is already someone in the list, then it's easy
    94                         if (elem`next) {
    95                                 this.head = next = elem`next;
     92                        if ( next( elem ) ) {
     93                                this.head = head = next( elem );
    9694                                // force memory sync
    9795                                __atomic_thread_fence(__ATOMIC_SEQ_CST);
    9896
    9997                                // invalidate link to reset to initial state
    100                                 elem`next = 0p;
     98                                next( elem ) = 0p;
    10199                        }
    102100                        // Otherwise, there might be a race where it only looks but someone is enqueuing
     
    106104                                // after that point, it could overwrite the write in push
    107105                                this.head = 0p;
    108                                 next = advance((mcs_queue(T)&)this, elem);
     106                                head = advance( (mcs_queue(T)&)this, elem );
    109107
    110108                                // Only write to the head if there is a next element
    111109                                // it is the only way we can guarantee we are not overwriting
    112110                                // a write made in push
    113                                 if (next) this.head = next;
    114                         }
    115 
     111                                if ( head ) this.head = head;
     112                        }
    116113                        // return removed element
    117114                        return elem;
     
    119116
    120117                // Same as previous function
    121                 T * pop(mpsc_queue(T) & this) {
     118                T * pop( mpsc_queue(T) & this ) {
    122119                        T * _ = 0p;
    123120                        return pop(this, _);
     
    144141        static inline bool is_poisoned( const poison_list(T) & this ) { return 1p == this.head; }
    145142
    146         static inline forall(| { T * volatile & ?`next ( T * ); })
     143        static inline forall( | { T * volatile & next( T * ); })
    147144        {
    148145                // Adds an element to the list
    149146                // Multi-Thread Safe, Lock-Free
    150                 bool push(poison_list(T) & this, T * elem) __attribute__((artificial));
    151                 bool push(poison_list(T) & this, T * elem) {
    152                         /* paranoid */ verify(0p == (elem`next));
    153                         __atomic_store_n( &elem`next, (T*)1p, __ATOMIC_RELAXED );
     147                bool push( poison_list(T) & this, T * elem ) __attribute__((artificial));
     148                bool push( poison_list(T) & this, T * elem ) {
     149                        /* paranoid */ verify( 0p == next( elem ) );
     150                        __atomic_store_n( &next( elem ), (T *)1p, __ATOMIC_RELAXED );
    154151
    155152                        // read the head up-front
     
    164161
    165162                                        // We should never succeed the CAS if it's poisonned and the elem should be 1p.
    166                                         /* paranoid */ verify( expected  != 1p );
    167                                         /* paranoid */ verify( elem`next == 1p );
     163                                        /* paranoid */ verify( expected != 1p );
     164                                        /* paranoid */ verify( next( elem ) == 1p );
    168165
    169166                                        // If we aren't the first, we need to tell the person before us
    170167                                        // No need to
    171                                         elem`next = expected;
     168                                        next( elem ) = expected;
    172169                                        return true;
    173170                                }
     
    178175                // Passing an element that is not the head is undefined behavior
    179176                // NOT Multi-Thread Safe, concurrent pushes are safe
    180                 T * advance(T * elem) __attribute__((artificial));
    181                 T * advance(T * elem) {
     177                T * advance( T * elem ) __attribute__((artificial));
     178                T * advance( T * elem ) {
    182179                        T * ret;
    183180
    184181                        // Wait for next item to show-up, filled by push
    185                         while (1p == (ret = __atomic_load_n(&elem`next, __ATOMIC_RELAXED))) Pause();
     182                        while (1p == (ret = __atomic_load_n( &next( elem ), __ATOMIC_RELAXED ) ) ) Pause();
    186183
    187184                        return ret;
     
    189186
    190187                // Poison the queue, preveting new pushes and returning the head
    191                 T * poison(poison_list(T) & this) __attribute__((artificial));
    192                 T * poison(poison_list(T) & this) {
     188                T * poison( poison_list(T) & this ) __attribute__((artificial));
     189                T * poison( poison_list(T) & this ) {
    193190                        T * ret = __atomic_exchange_n( &this.head, (T*)1p, __ATOMIC_SEQ_CST );
    194191                        /* paranoid */ verifyf( ret != (T*)1p, "Poison list %p poisoned more than once!", &this );
     
    215212}; // Link
    216213
    217 forall( T /*| sized(T)*/ | { Link(T) * ?`next( T * ); } ) {
     214forall( T /*| sized(T)*/ | { Link(T) * next( T * ); } ) {
    218215        struct StackLF {
    219216                Link(T) stack;
     
    226223
    227224                void push( StackLF(T) & this, T & n ) with(this) {
    228                         *( &n )`next = stack;                                           // atomic assignment unnecessary, or use CAA
     225                        *next( &n ) = stack;                                            // atomic assignment unnecessary, or use CAA
    229226                        for () {                                                                        // busy wait
    230                                 if ( __atomic_compare_exchange_n( &stack.atom, &( &n )`next->atom, (Link(T))@{ (LinkData(T))@{ &n, ( &n )`next->data.count + 1} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) break; // attempt to update top node
     227                                if ( __atomic_compare_exchange_n( &stack.atom, &next( &n )->atom, (Link(T))@{ (LinkData(T))@{ &n, next( &n )->data.count + 1} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) break; // attempt to update top node
    231228                        } // for
    232229                } // push
     
    236233                        for () {                                                                        // busy wait
    237234                                if ( t.data.top == 0p ) return 0p;              // empty stack ?
    238                                 Link(T) * next = ( t.data.top )`next;
     235                                Link(T) * next = next( t.data.top );
    239236                                if ( __atomic_compare_exchange_n( &stack.atom, &t.atom, (Link(T))@{ (LinkData(T))@{ next->data.top, t.data.count } }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return t.data.top; // attempt to update top node
    240237                        } // for
     
    246243                                // TODO: Avoiding some problems with double fields access.
    247244                                LinkData(T) * data = &link->data;
    248                                 T * next = (T *)&(*data).top;
    249                                 if ( next == node ) {
    250                                         data->top = ( node )`next->data.top;
     245                                T * ntop = (T *)&(*data).top;
     246                                if ( ntop == node ) {
     247                                        data->top = next( node )->data.top;
    251248                                        return true;
    252249                                }
    253                                 if ( next == 0p ) return false;
    254                                 link = ( next )`next;
     250                                if ( ntop == 0p ) return false;
     251                                link = next( ntop );
    255252                        }
    256253                }
  • libcfa/src/collections/vector2.hfa

    rf85de47 r65bd3c2  
    1010// Created On       : Thu Jun 23 22:00:00 2021
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Mar 14 08:40:53 2023
    13 // Update Count     : 2
     12// Last Modified On : Wed Apr 23 14:39:51 2025
     13// Update Count     : 6
    1414//
    1515
     
    254254        }
    255255
    256         while ( vector_permit(T) & liveIter = this.live_iters_$`elems; liveIter`moveNext ) {
     256        while ( vector_permit(T) & liveIter = iter( this.live_iters_$ ); advance( liveIter ) ) {
    257257            liveIter.item_$ += (newItems - this.buffer_first_$);
    258258        }
     
    350350        *insertTarget = val;
    351351
    352         while ( vector_permit(T) & liveIter = col.live_iters_$`elems; liveIter`moveNext ) {
     352        while ( vector_permit(T) & liveIter = iter( col.live_iters_$ ); advance( liveIter ) ) {
    353353            if ( inRange_$(liveIter.item_$, insertTarget, col.elems_end_$) ) {
    354354                liveIter.item_$ += 1;
  • libcfa/src/concurrency/alarm.cfa

    rf85de47 r65bd3c2  
    1010// Created On       : Fri Jun 2 11:31:25 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Jun 17 16:11:35 2020
    13 // Update Count     : 75
     12// Last Modified On : Thu Apr 24 22:22:25 2025
     13// Update Count     : 88
    1414//
    1515
     
    8484
    8585void insert( alarm_list_t * this, alarm_node_t * n ) {
    86         alarm_node_t * it = & (*this)`first;
    87         while( it && (n->deadline > it->deadline) ) {
    88                 it = & (*it)`next;
    89         }
    90         if ( it ) {
    91                 insert_before( *it, *n );
    92         } else {
    93                 insert_last(*this, *n);
    94         }
    95 
     86        alarm_node_t & it = iter( *this );
     87        while ( advance( it ) && it.deadline <= n->deadline );
     88        insert_before( it, *n );
    9689        verify( validate( *this ) );
    9790}
     
    9992alarm_node_t * pop( alarm_list_t * this ) {
    10093        verify( validate( *this ) );
    101         alarm_node_t * head = & (*this)`first;
     94        alarm_node_t * head = &first( *this );
    10295        if( head ) {
    10396                remove(*head);
     
    147140        park();
    148141
    149         /* paranoid */ verify( !node.set );
    150         /* paranoid */ verify( & node`next == 0p );
    151         /* paranoid */ verify( & node`prev == 0p );
     142        /* paranoid */ verify( ! node.set );
     143        /* paranoid */ verify( & next( node ) == 0p );
     144        /* paranoid */ verify( & prev( node ) == 0p );
    152145}
    153146
  • libcfa/src/concurrency/barrier.hfa

    rf85de47 r65bd3c2  
    1111// Created On       : Sun Nov 10 08:07:35 2024
    1212// Last Modified By : Peter A. Buhr
    13 // Last Modified On : Wed Nov 13 12:37:04 2024
    14 // Update Count     : 9
     13// Last Modified On : Thu Apr 24 22:41:11 2025
     14// Update Count     : 12
    1515//
    1616
     
    3131
    3232// Returns a value indicating the reverse order the threads arrived, i.e. last thread returns 0 (and does not block)
    33 // last is an optional hook that is called by the Gth thread before unblocking the other threads.
    34 static inline unsigned int block( barrier & mutex b, fptr_t last = (fptr_t)0 ) with( b ) {
     33// hook is an optional hook that is called by the Gth thread before unblocking the other threads.
     34static inline unsigned int block( barrier & mutex b, fptr_t hook = (fptr_t)0 ) with( b ) {
    3535        arrivals -= 1;                                                                          // prefix decrement so last is 0 not 1
    3636        unsigned arrived = b.arrivals;                                          // note arrival order
     
    3838                wait( b.c );
    3939        } else {                                                                                        // group formed
    40                 if ( last ) last();                                                             // safe to call
     40                if ( hook ) hook();                                                             // safe to call
    4141                signal_all( c );                                                                // unblock group
    4242                arrivals = group;                                                               // reset
  • libcfa/src/concurrency/channel.hfa

    rf85de47 r65bd3c2  
    5757
    5858forall( T ) {
    59 
    60 struct __attribute__((aligned(128))) channel {
    61     size_t size, front, back, count;
    62     T * buffer;
    63     dlist( select_node ) prods, cons; // lists of blocked threads
    64     go_mutex mutex_lock;              // MX lock
    65     bool closed;                      // indicates channel close/open
    66     #ifdef CHAN_STATS
    67     size_t p_blocks, p_ops, c_blocks, c_ops;      // counts total ops and ops resulting in a blocked thd
    68     #endif
    69 };
    70 static inline void ?{}( channel(T) & this, channel(T) this2 ) = void;
    71 static inline void ?=?( channel(T) & this, channel(T) this2 ) = void;
    72 
    73 static inline void ?{}( channel(T) &c, size_t _size ) with(c) {
    74     size = _size;
    75     front = back = count = 0;
    76     if ( size != 0 ) buffer = aalloc( size );
    77     prods{};
    78     cons{};
    79     mutex_lock{};
    80     closed = false;
    81     #ifdef CHAN_STATS
    82     p_blocks = 0;
    83     p_ops = 0;
    84     c_blocks = 0;
    85     c_ops = 0;
    86     #endif
    87 }
    88 
    89 static inline void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; }
    90 static inline void ^?{}( channel(T) &c ) with(c) {
    91     #ifdef CHAN_STATS
    92     printf("Channel %p Blocks: %lu,\t\tOperations: %lu,\t%.2f%% of ops blocked\n", &c, p_blocks + c_blocks, p_ops + c_ops, ((double)p_blocks + c_blocks)/(p_ops + c_ops) * 100);
    93     printf("Channel %p Consumer Blocks: %lu,\tConsumer Ops: %lu,\t%.2f%% of Consumer ops blocked\n", &c, p_blocks, p_ops, ((double)p_blocks)/p_ops * 100);
    94     printf("Channel %p Producer Blocks: %lu,\tProducer Ops: %lu,\t%.2f%% of Producer ops blocked\n", &c, c_blocks, c_ops, ((double)c_blocks)/c_ops * 100);
    95     #endif
    96     verifyf( __handle_waituntil_OR( cons ) || __handle_waituntil_OR( prods ) || cons`isEmpty && prods`isEmpty,
    97         "Attempted to delete channel with waiting threads (Deadlock).\n" );
    98     if ( size != 0 ) delete( buffer );
    99 }
    100 static inline size_t get_count( channel(T) & chan ) with(chan) { return __atomic_load_n( &count, __ATOMIC_RELAXED ); }
    101 static inline size_t get_size( channel(T) & chan ) with(chan) { return __atomic_load_n( &size, __ATOMIC_RELAXED ); }
    102 static inline bool has_waiters( channel(T) & chan ) with(chan) { return !cons`isEmpty || !prods`isEmpty; }
    103 static inline bool has_waiting_consumers( channel(T) & chan ) with(chan) { return !cons`isEmpty; }
    104 static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return !prods`isEmpty; }
    105 
    106 // closes the channel and notifies all blocked threads
    107 static inline void close( channel(T) & chan ) with(chan) {
    108     lock( mutex_lock );
    109     closed = true;
    110 
    111     // flush waiting consumers and producers
    112     while ( has_waiting_consumers( chan ) ) {
    113         if( !__handle_waituntil_OR( cons ) ) // ensure we only signal special OR case threads when they win the race
    114             break;  // if __handle_waituntil_OR returns false cons is empty so break
    115         cons`first.extra = 0p;
    116         wake_one( cons );
    117     }
    118     while ( has_waiting_producers( chan ) ) {
    119         if( !__handle_waituntil_OR( prods ) ) // ensure we only signal special OR case threads when they win the race
    120             break;  // if __handle_waituntil_OR returns false prods is empty so break
    121         prods`first.extra = 0p;
    122         wake_one( prods );
    123     }
    124     unlock(mutex_lock);
    125 }
    126 
    127 static inline void is_closed( channel(T) & chan ) with(chan) { return closed; }
    128 
    129 // used to hand an element to a blocked consumer and signal it
    130 static inline void __cons_handoff( channel(T) & chan, T & elem ) with(chan) {
    131     memcpy( cons`first.extra, (void *)&elem, sizeof(T) ); // do waiting consumer work
    132     wake_one( cons );
    133 }
    134 
    135 // used to hand an element to a blocked producer and signal it
    136 static inline void __prods_handoff( channel(T) & chan, T & retval ) with(chan) {
    137     memcpy( (void *)&retval, prods`first.extra, sizeof(T) );
    138     wake_one( prods );
    139 }
    140 
    141 static inline void flush( channel(T) & chan, T elem ) with(chan) {
    142     lock( mutex_lock );
    143     while ( count == 0 && !cons`isEmpty ) {
    144         __cons_handoff( chan, elem );
    145     }
    146     unlock( mutex_lock );
    147 }
    148 
    149 // handles buffer insert
    150 static inline void __buf_insert( channel(T) & chan, T & elem ) with(chan) {
    151     memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T) );
    152     count += 1;
    153     back++;
    154     if ( back == size ) back = 0;
    155 }
    156 
    157 // needed to avoid an extra copy in closed case
    158 static inline bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) {
    159     lock( mutex_lock );
    160     #ifdef CHAN_STATS
    161     p_ops++;
    162     #endif
    163 
    164     ConsEmpty: if ( !cons`isEmpty ) {
    165         if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty;
    166         __cons_handoff( chan, elem );
    167         unlock( mutex_lock );
    168         return true;
    169     }
    170 
    171     if ( count == size ) { unlock( mutex_lock ); return false; }
    172 
    173     __buf_insert( chan, elem );
    174     unlock( mutex_lock );
    175     return true;
    176 }
    177 
    178 // attempts a nonblocking insert
    179 // returns true if insert was successful, false otherwise
    180 static inline bool try_insert( channel(T) & chan, T elem ) { return __internal_try_insert( chan, elem ); }
    181 
    182 // handles closed case of insert routine
    183 static inline void __closed_insert( channel(T) & chan, T & elem ) with(chan) {
    184     channel_closed except{ &channel_closed_vt, &elem, &chan };
    185     throwResume except; // throw closed resumption
    186     if ( !__internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination
    187 }
    188 
    189 static inline void insert( channel(T) & chan, T elem ) with(chan) {
    190     // check for close before acquire mx
    191     if ( unlikely(closed) ) {
    192         __closed_insert( chan, elem );
    193         return;
    194     }
    195 
    196     lock( mutex_lock );
    197 
    198     #ifdef CHAN_STATS
    199     if ( !closed ) p_ops++;
    200     #endif
    201 
    202     // if closed handle
    203     if ( unlikely(closed) ) {
    204         unlock( mutex_lock );
    205         __closed_insert( chan, elem );
    206         return;
    207     }
    208 
    209     // buffer count must be zero if cons are blocked (also handles zero-size case)
    210     ConsEmpty: if ( !cons`isEmpty ) {
    211         if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty;
    212         __cons_handoff( chan, elem );
    213         unlock( mutex_lock );
    214         return;
    215     }
    216 
    217     // wait if buffer is full, work will be completed by someone else
    218     if ( count == size ) {
    219         #ifdef CHAN_STATS
    220         p_blocks++;
    221         #endif
    222 
    223         // check for if woken due to close
    224         if ( unlikely( block( prods, &elem, mutex_lock ) ) )
    225             __closed_insert( chan, elem );
    226         return;
    227     } // if
    228 
    229     __buf_insert( chan, elem );
    230     unlock( mutex_lock );
    231 }
    232 
    233 // does the buffer remove and potentially does waiting producer work
    234 static inline void __do_remove( channel(T) & chan, T & retval ) with(chan) {
    235     memcpy( (void *)&retval, (void *)&buffer[front], sizeof(T) );
    236     count -= 1;
    237     front = (front + 1) % size;
    238     if (count == size - 1 && !prods`isEmpty ) {
    239         if ( !__handle_waituntil_OR( prods ) ) return;
    240         __buf_insert( chan, *(T *)prods`first.extra );  // do waiting producer work
    241         wake_one( prods );
    242     }
    243 }
    244 
    245 // needed to avoid an extra copy in closed case and single return val case
    246 static inline bool __internal_try_remove( channel(T) & chan, T & retval ) with(chan) {
    247     lock( mutex_lock );
    248     #ifdef CHAN_STATS
    249     c_ops++;
    250     #endif
    251 
    252     ZeroSize: if ( size == 0 && !prods`isEmpty ) {
    253         if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
    254         __prods_handoff( chan, retval );
    255         unlock( mutex_lock );
    256         return true;
    257     }
    258 
    259     if ( count == 0 ) { unlock( mutex_lock ); return false; }
    260 
    261     __do_remove( chan, retval );
    262     unlock( mutex_lock );
    263     return true;
    264 }
    265 
    266 // attempts a nonblocking remove
    267 // returns [T, true] if insert was successful
    268 // returns [T, false] if insert was successful (T uninit)
    269 static inline [T, bool] try_remove( channel(T) & chan ) {
    270     T retval;
    271     bool success = __internal_try_remove( chan, retval );
    272     return [ retval, success ];
    273 }
    274 
    275 static inline T try_remove( channel(T) & chan ) {
    276     T retval;
    277     __internal_try_remove( chan, retval );
    278     return retval;
    279 }
    280 
    281 // handles closed case of insert routine
    282 static inline void __closed_remove( channel(T) & chan, T & retval ) with(chan) {
    283     channel_closed except{ &channel_closed_vt, 0p, &chan };
    284     throwResume except; // throw resumption
    285     if ( !__internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination
    286 }
    287 
    288 static inline T remove( channel(T) & chan ) with(chan) {
    289     T retval;
    290     if ( unlikely(closed) ) {
    291         __closed_remove( chan, retval );
    292         return retval;
    293     }
    294     lock( mutex_lock );
    295 
    296     #ifdef CHAN_STATS
    297     if ( !closed ) c_ops++;
    298     #endif
    299 
    300     if ( unlikely(closed) ) {
    301         unlock( mutex_lock );
    302         __closed_remove( chan, retval );
    303         return retval;
    304     }
    305 
    306     // have to check for the zero size channel case
    307     ZeroSize: if ( size == 0 && !prods`isEmpty ) {
    308         if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
    309         __prods_handoff( chan, retval );
    310         unlock( mutex_lock );
    311         return retval;
    312     }
    313 
    314     // wait if buffer is empty, work will be completed by someone else
    315     if ( count == 0 ) {
    316         #ifdef CHAN_STATS
    317         c_blocks++;
    318         #endif
    319         // check for if woken due to close
    320         if ( unlikely( block( cons, &retval, mutex_lock ) ) )
    321             __closed_remove( chan, retval );
    322         return retval;
    323     }
    324 
    325     // Remove from buffer
    326     __do_remove( chan, retval );
    327     unlock( mutex_lock );
    328     return retval;
    329 }
    330 static inline void remove( channel(T) & chan ) { T elem = (T)remove( chan ); }
    331 
    332 
    333 ///////////////////////////////////////////////////////////////////////////////////////////
    334 // The following is Go-style operator support for channels
    335 ///////////////////////////////////////////////////////////////////////////////////////////
    336 
    337 static inline void ?<<?( channel(T) & chan, T elem ) { insert( chan, elem ); }
    338 static inline void ?<<?( T & ret, channel(T) & chan ) { ret = remove( chan ); }
    339 
    340 ///////////////////////////////////////////////////////////////////////////////////////////
    341 // The following is support for waituntil (select) statements
    342 ///////////////////////////////////////////////////////////////////////////////////////////
    343 static inline bool unregister_chan( channel(T) & chan, select_node & node ) with(chan) {
    344     if ( !node`isListed && !node.park_counter ) return false; // handle special OR case
    345     lock( mutex_lock );
    346     if ( node`isListed ) { // op wasn't performed
    347         remove( node );
    348         unlock( mutex_lock );
    349         return false;
    350     }
    351     unlock( mutex_lock );
    352 
    353     // only return true when not special OR case and status is SAT
    354     return !node.park_counter ? false : *node.clause_status == __SELECT_SAT;
    355 }
    356 
    357 // special case of __handle_waituntil_OR, that does some work to avoid starvation/deadlock case
    358 static inline bool __handle_pending( dlist( select_node ) & queue, select_node & mine ) {
    359     while ( !queue`isEmpty ) {
    360         // if node not a special OR case or if we win the special OR case race break
    361         if ( !queue`first.clause_status || queue`first.park_counter || __pending_set_other( queue`first, mine, ((unsigned long int)(&(queue`first))) ) )
    362             return true;
     59         struct __attribute__((aligned(128))) channel {
     60                size_t size, front, back, count;
     61                T * buffer;
     62                dlist( select_node ) prods, cons;                               // lists of blocked threads
     63                go_mutex mutex_lock;                                                    // MX lock
     64                bool closed;                                                                    // indicates channel close/open
     65                #ifdef CHAN_STATS
     66                size_t p_blocks, p_ops, c_blocks, c_ops;                // counts total ops and ops resulting in a blocked thd
     67            #endif
     68        };
     69
     70         // type used by select statement to capture a chan read as the selected operation
     71         struct chan_read {
     72                 T * ret;
     73                 channel(T) * chan;
     74         };
     75         __CFA_SELECT_GET_TYPE( chan_read(T) );
     76
     77         // type used by select statement to capture a chan read as the selected operation that doesn't have a param to read to
     78         struct chan_read_no_ret {
     79                 T retval;
     80                 chan_read( T ) c_read;
     81         };
     82         __CFA_SELECT_GET_TYPE( chan_read_no_ret(T) );
     83
     84         // type used by select statement to capture a chan write as the selected operation
     85         struct chan_write {
     86                 T elem;
     87                 channel(T) * chan;
     88         };
     89         __CFA_SELECT_GET_TYPE( chan_write(T) );
     90} // distribution
     91
     92static inline forall( T ) {
     93        void ?{}( channel(T) & this, channel(T) this2 ) = void;
     94        void ?=?( channel(T) & this, channel(T) this2 ) = void;
     95
     96        void ?{}( channel(T) &c, size_t _size ) with(c) {
     97                size = _size;
     98                front = back = count = 0;
     99                if ( size != 0 ) buffer = aalloc( size );
     100                prods{};
     101                cons{};
     102                mutex_lock{};
     103                closed = false;
     104            #ifdef CHAN_STATS
     105                p_blocks = 0;
     106                p_ops = 0;
     107                c_blocks = 0;
     108                c_ops = 0;
     109            #endif
     110        }
     111
     112        void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; }
     113        void ^?{}( channel(T) &c ) with(c) {
     114            #ifdef CHAN_STATS
     115                printf("Channel %p Blocks: %lu,\t\tOperations: %lu,\t%.2f%% of ops blocked\n", &c, p_blocks + c_blocks, p_ops + c_ops, ((double)p_blocks + c_blocks)/(p_ops + c_ops) * 100);
     116                printf("Channel %p Consumer Blocks: %lu,\tConsumer Ops: %lu,\t%.2f%% of Consumer ops blocked\n", &c, p_blocks, p_ops, ((double)p_blocks)/p_ops * 100);
     117                printf("Channel %p Producer Blocks: %lu,\tProducer Ops: %lu,\t%.2f%% of Producer ops blocked\n", &c, c_blocks, c_ops, ((double)c_blocks)/c_ops * 100);
     118            #endif
     119                verifyf( __handle_waituntil_OR( cons ) || __handle_waituntil_OR( prods ) || isEmpty( cons ) && isEmpty( prods ),
     120                                 "Attempted to delete channel with waiting threads (Deadlock).\n" );
     121                if ( size != 0 ) delete( buffer );
     122        }
     123        size_t get_count( channel(T) & chan ) with(chan) { return __atomic_load_n( &count, __ATOMIC_RELAXED ); }
     124        size_t get_size( channel(T) & chan ) with(chan) { return __atomic_load_n( &size, __ATOMIC_RELAXED ); }
     125        bool has_waiters( channel(T) & chan ) with(chan) { return ! isEmpty( cons ) || ! isEmpty( prods ); }
     126        bool has_waiting_consumers( channel(T) & chan ) with(chan) { return ! isEmpty( cons ); }
     127        bool has_waiting_producers( channel(T) & chan ) with(chan) { return ! isEmpty( prods ); }
     128
     129        // closes the channel and notifies all blocked threads
     130        void close( channel(T) & chan ) with(chan) {
     131                lock( mutex_lock );
     132                closed = true;
     133
     134                // flush waiting consumers and producers
     135                while ( has_waiting_consumers( chan ) ) {
     136                        if( ! __handle_waituntil_OR( cons ) ) // ensure we only signal special OR case threads when they win the race
     137                                break;  // if __handle_waituntil_OR returns false cons is empty so break
     138                        first( cons ).extra = 0p;
     139                        wake_one( cons );
     140                }
     141                while ( has_waiting_producers( chan ) ) {
     142                        if( ! __handle_waituntil_OR( prods ) ) // ensure we only signal special OR case threads when they win the race
     143                                break;  // if __handle_waituntil_OR returns false prods is empty so break
     144                        first( prods ).extra = 0p;
     145                        wake_one( prods );
     146                }
     147                unlock(mutex_lock);
     148        }
     149
     150        void is_closed( channel(T) & chan ) with(chan) { return closed; }
     151
     152        // used to hand an element to a blocked consumer and signal it
     153        void __cons_handoff( channel(T) & chan, T & elem ) with(chan) {
     154                memcpy( first( cons ).extra, (void *)&elem, sizeof(T) ); // do waiting consumer work
     155                wake_one( cons );
     156        }
     157
     158        // used to hand an element to a blocked producer and signal it
     159        void __prods_handoff( channel(T) & chan, T & retval ) with(chan) {
     160                memcpy( (void *)&retval, first( prods ).extra, sizeof(T) );
     161                wake_one( prods );
     162        }
     163
     164        void flush( channel(T) & chan, T elem ) with(chan) {
     165                lock( mutex_lock );
     166                while ( count == 0 && ! isEmpty( cons ) ) {
     167                        __cons_handoff( chan, elem );
     168                }
     169                unlock( mutex_lock );
     170        }
     171
     172        // handles buffer insert
     173        void __buf_insert( channel(T) & chan, T & elem ) with(chan) {
     174                memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T) );
     175                count += 1;
     176                back++;
     177                if ( back == size ) back = 0;
     178        }
     179
     180        // needed to avoid an extra copy in closed case
     181        bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) {
     182                lock( mutex_lock );
     183            #ifdef CHAN_STATS
     184                p_ops++;
     185            #endif
     186
     187          ConsEmpty:
     188                if ( ! isEmpty( cons ) ) {
     189                        if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty;
     190                        __cons_handoff( chan, elem );
     191                        unlock( mutex_lock );
     192                        return true;
     193                }
     194
     195                if ( count == size ) { unlock( mutex_lock ); return false; }
     196
     197                __buf_insert( chan, elem );
     198                unlock( mutex_lock );
     199                return true;
     200        }
     201
     202        // attempts a nonblocking insert
     203        // returns true if insert was successful, false otherwise
     204        bool try_insert( channel(T) & chan, T elem ) { return __internal_try_insert( chan, elem ); }
     205
     206        // handles closed case of insert routine
     207        void __closed_insert( channel(T) & chan, T & elem ) with(chan) {
     208                channel_closed except{ &channel_closed_vt, &elem, &chan };
     209                throwResume except; // throw closed resumption
     210                if ( ! __internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination
     211        }
     212
     213        void insert( channel(T) & chan, T elem ) with(chan) {
     214                // check for close before acquire mx
     215                if ( unlikely(closed) ) {
     216                        __closed_insert( chan, elem );
     217                        return;
     218                }
     219
     220                lock( mutex_lock );
     221
     222            #ifdef CHAN_STATS
     223                if ( ! closed ) p_ops++;
     224            #endif
     225
     226                // if closed handle
     227                if ( unlikely(closed) ) {
     228                        unlock( mutex_lock );
     229                        __closed_insert( chan, elem );
     230                        return;
     231                }
     232
     233                // buffer count must be zero if cons are blocked (also handles zero-size case)
     234          ConsEmpty:
     235                if ( ! isEmpty( cons ) ) {
     236                        if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty;
     237                        __cons_handoff( chan, elem );
     238                        unlock( mutex_lock );
     239                        return;
     240                }
     241
     242                // wait if buffer is full, work will be completed by someone else
     243                if ( count == size ) {
     244                #ifdef CHAN_STATS
     245                        p_blocks++;
     246                #endif
     247
     248                        // check for if woken due to close
     249                        if ( unlikely( block( prods, &elem, mutex_lock ) ) )
     250                                __closed_insert( chan, elem );
     251                        return;
     252                } // if
     253
     254                __buf_insert( chan, elem );
     255                unlock( mutex_lock );
     256        }
     257
     258        // does the buffer remove and potentially does waiting producer work
     259        void __do_remove( channel(T) & chan, T & retval ) with(chan) {
     260                memcpy( (void *)&retval, (void *)&buffer[front], sizeof(T) );
     261                count -= 1;
     262                front = (front + 1) % size;
     263                if (count == size - 1 && ! isEmpty( prods ) ) {
     264                        if ( ! __handle_waituntil_OR( prods ) ) return;
     265                        __buf_insert( chan, *(T *)first( prods ).extra );  // do waiting producer work
     266                        wake_one( prods );
     267                }
     268        }
     269
     270        // needed to avoid an extra copy in closed case and single return val case
     271        bool __internal_try_remove( channel(T) & chan, T & retval ) with(chan) {
     272                lock( mutex_lock );
     273            #ifdef CHAN_STATS
     274                c_ops++;
     275            #endif
     276
     277          ZeroSize:
     278                if ( size == 0 && ! isEmpty( prods ) ) {
     279                        if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize;
     280                        __prods_handoff( chan, retval );
     281                        unlock( mutex_lock );
     282                        return true;
     283                }
     284
     285                if ( count == 0 ) { unlock( mutex_lock ); return false; }
     286
     287                __do_remove( chan, retval );
     288                unlock( mutex_lock );
     289                return true;
     290        }
     291
     292        // attempts a nonblocking remove
     293        // returns [T, true] if insert was successful
     294        // returns [T, false] if insert was successful (T uninit)
     295        [T, bool] try_remove( channel(T) & chan ) {
     296                T retval;
     297                bool success = __internal_try_remove( chan, retval );
     298                return [ retval, success ];
     299        }
     300
     301        T try_remove( channel(T) & chan ) {
     302                T retval;
     303                __internal_try_remove( chan, retval );
     304                return retval;
     305        }
     306
     307        // handles closed case of insert routine
     308        void __closed_remove( channel(T) & chan, T & retval ) with(chan) {
     309                channel_closed except{ &channel_closed_vt, 0p, &chan };
     310                throwResume except; // throw resumption
     311                if ( ! __internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination
     312        }
     313
     314        T remove( channel(T) & chan ) with(chan) {
     315                T retval;
     316                if ( unlikely(closed) ) {
     317                        __closed_remove( chan, retval );
     318                        return retval;
     319                }
     320                lock( mutex_lock );
     321
     322            #ifdef CHAN_STATS
     323                if ( ! closed ) c_ops++;
     324                #endif
     325
     326                if ( unlikely(closed) ) {
     327                        unlock( mutex_lock );
     328                        __closed_remove( chan, retval );
     329                        return retval;
     330                }
     331
     332                // have to check for the zero size channel case
     333          ZeroSize:
     334                if ( size == 0 && ! isEmpty( prods ) ) {
     335                        if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize;
     336                        __prods_handoff( chan, retval );
     337                        unlock( mutex_lock );
     338                        return retval;
     339                }
     340
     341                // wait if buffer is empty, work will be completed by someone else
     342                if ( count == 0 ) {
     343                #ifdef CHAN_STATS
     344                        c_blocks++;
     345                #endif
     346                        // check for if woken due to close
     347                        if ( unlikely( block( cons, &retval, mutex_lock ) ) )
     348                                __closed_remove( chan, retval );
     349                        return retval;
     350                }
     351
     352                // Remove from buffer
     353                __do_remove( chan, retval );
     354                unlock( mutex_lock );
     355                return retval;
     356        }
     357        void remove( channel(T) & chan ) { T elem = (T)remove( chan ); }
     358
     359
     360        ///////////////////////////////////////////////////////////////////////////////////////////
     361        // The following is Go-style operator support for channels
     362        ///////////////////////////////////////////////////////////////////////////////////////////
     363
     364        void ?<<?( channel(T) & chan, T elem ) { insert( chan, elem ); }
     365        void ?<<?( T & ret, channel(T) & chan ) { ret = remove( chan ); }
     366
     367        ///////////////////////////////////////////////////////////////////////////////////////////
     368        // The following is support for waituntil (select) statements
     369        ///////////////////////////////////////////////////////////////////////////////////////////
     370        bool unregister_chan( channel(T) & chan, select_node & node ) with(chan) {
     371            if ( ! isListed( node ) && ! node.park_counter ) return false; // handle special OR case
     372            lock( mutex_lock );
     373            if ( isListed( node ) ) { // op wasn't performed
     374                remove( node );
     375                unlock( mutex_lock );
     376                return false;
     377            }
     378            unlock( mutex_lock );
     379
     380            // only return true when not special OR case and status is SAT
     381            return ! node.park_counter ? false : *node.clause_status == __SELECT_SAT;
     382        }
     383
     384        // special case of __handle_waituntil_OR, that does some work to avoid starvation/deadlock case
     385        bool __handle_pending( dlist( select_node ) & queue, select_node & mine ) {
     386            while ( ! isEmpty( queue ) ) {
     387                // if node not a special OR case or if we win the special OR case race break
     388                if ( ! first( queue ).clause_status || first( queue ).park_counter || __pending_set_other( first( queue ), mine, ((unsigned long int)(&(first( queue )))) ) )
     389                    return true;
    363390       
    364         // our node lost the race when toggling in __pending_set_other
    365         if ( *mine.clause_status != __SELECT_PENDING )
    366             return false;
    367 
    368         // otherwise we lost the special OR race so discard node
    369         try_pop_front( queue );
    370     }
    371     return false;
    372 }
    373 
    374 // type used by select statement to capture a chan read as the selected operation
    375 struct chan_read {
    376     T * ret;
    377     channel(T) * chan;
    378 };
    379 __CFA_SELECT_GET_TYPE( chan_read(T) );
    380 
    381 static inline void ?{}( chan_read(T) & cr, channel(T) * chan, T * ret ) {
    382     cr.chan = chan;
    383     cr.ret = ret;
    384 }
    385 static inline chan_read(T) ?<<?( T & ret, channel(T) & chan ) { chan_read(T) cr{ &chan, &ret }; return cr; }
    386 
    387 static inline void __handle_select_closed_read( chan_read(T) & this, select_node & node ) with(*this.chan, this) {
    388     __closed_remove( *chan, *ret );
    389     // if we get here then the insert succeeded
    390     __make_select_node_available( node );
    391 }
    392 
    393 static inline bool register_select( chan_read(T) & this, select_node & node ) with(*this.chan, this) {
    394     lock( mutex_lock );
    395     node.extra = ret; // set .extra so that if it == 0p later in on_selected it is due to channel close
    396 
    397     #ifdef CHAN_STATS
    398     if ( !closed ) c_ops++;
    399     #endif
    400 
    401     if ( !node.park_counter ) {
    402         // are we special case OR and front of cons is also special case OR
    403         if ( !unlikely(closed) && !prods`isEmpty && prods`first.clause_status && !prods`first.park_counter ) {
    404             if ( !__make_select_node_pending( node ) ) {
    405                 unlock( mutex_lock );
    406                 return false;
    407             }
    408 
    409             if ( __handle_pending( prods, node ) ) {
    410                 __prods_handoff( *chan, *ret );
    411                 __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node
    412                 unlock( mutex_lock );
    413                 return true;
    414             }
    415             if ( *node.clause_status == __SELECT_PENDING )
    416                 __make_select_node_unsat( node );
    417         }
    418         // check if we can complete operation. If so race to establish winner in special OR case
    419         if ( count != 0 || !prods`isEmpty || unlikely(closed) ) {
    420             if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
    421                 unlock( mutex_lock );
    422                 return false;
    423             }
    424         }
    425     }
    426 
    427     if ( unlikely(closed) ) {
    428         unlock( mutex_lock );
    429         __handle_select_closed_read( this, node );
    430         return true;
    431     }
    432 
    433     // have to check for the zero size channel case
    434     ZeroSize: if ( size == 0 && !prods`isEmpty ) {
    435         if ( !__handle_waituntil_OR( prods ) ) break ZeroSize;
    436         __prods_handoff( *chan, *ret );
    437         __set_avail_then_unlock( node, mutex_lock );
    438         return true;
    439     }
    440 
    441     // wait if buffer is empty, work will be completed by someone else
    442     if ( count == 0 ) {
    443         #ifdef CHAN_STATS
    444         c_blocks++;
    445         #endif
     391                // our node lost the race when toggling in __pending_set_other
     392                if ( *mine.clause_status != __SELECT_PENDING )
     393                    return false;
     394
     395                // otherwise we lost the special OR race so discard node
     396                remove_first( queue );
     397            }
     398            return false;
     399        }
     400
     401        void ?{}( chan_read(T) & cr, channel(T) * chan, T * ret ) {
     402            cr.chan = chan;
     403            cr.ret = ret;
     404        }
     405        chan_read(T) ?<<?( T & ret, channel(T) & chan ) { chan_read(T) cr{ &chan, &ret }; return cr; }
     406
     407                void __handle_select_closed_read( chan_read(T) & this, select_node & node ) with(*this.chan, this) {
     408            __closed_remove( *chan, *ret );
     409            // if we get here then the insert succeeded
     410            __make_select_node_available( node );
     411        }
     412
     413        bool register_select( chan_read(T) & this, select_node & node ) with(*this.chan, this) {
     414            lock( mutex_lock );
     415            node.extra = ret; // set .extra so that if it == 0p later in on_selected it is due to channel close
     416
     417            #ifdef CHAN_STATS
     418            if ( ! closed ) c_ops++;
     419            #endif
     420
     421            if ( ! node.park_counter ) {
     422                // are we special case OR and front of cons is also special case OR
     423                if ( ! unlikely(closed) && ! isEmpty( prods ) && first( prods ).clause_status && ! first( prods ).park_counter ) {
     424                    if ( ! __make_select_node_pending( node ) ) {
     425                        unlock( mutex_lock );
     426                        return false;
     427                    }
     428
     429                            if ( __handle_pending( prods, node ) ) {
     430                        __prods_handoff( *chan, *ret );
     431                        __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node
     432                        unlock( mutex_lock );
     433                        return true;
     434                    }
     435                    if ( *node.clause_status == __SELECT_PENDING )
     436                        __make_select_node_unsat( node );
     437                }
     438                // check if we can complete operation. If so race to establish winner in special OR case
     439                if ( count != 0 || ! isEmpty( prods ) || unlikely(closed) ) {
     440                    if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     441                        unlock( mutex_lock );
     442                        return false;
     443                    }
     444                }
     445            }
     446
     447            if ( unlikely(closed) ) {
     448                unlock( mutex_lock );
     449                __handle_select_closed_read( this, node );
     450                return true;
     451            }
     452
     453            // have to check for the zero size channel case
     454            ZeroSize:
     455                if ( size == 0 && ! isEmpty( prods ) ) {
     456                        if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize;
     457                        __prods_handoff( *chan, *ret );
     458                        __set_avail_then_unlock( node, mutex_lock );
     459                        return true;
     460                }
     461
     462                // wait if buffer is empty, work will be completed by someone else
     463                if ( count == 0 ) {
     464                #ifdef CHAN_STATS
     465                c_blocks++;
     466                #endif
    446467       
    447         insert_last( cons, node );
    448         unlock( mutex_lock );
    449         return false;
    450     }
    451 
    452     // Remove from buffer
    453     __do_remove( *chan, *ret );
    454     __set_avail_then_unlock( node, mutex_lock );
    455     return true;
    456 }
    457 static inline bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); }
    458 static inline bool on_selected( chan_read(T) & this, select_node & node ) with(this) {
    459     if ( unlikely(node.extra == 0p) ) {
    460         if ( !exception_in_flight() ) __closed_remove( *chan, *ret ); // check if woken up due to closed channel
    461         else return false;
    462     }
    463     // This is only reachable if not closed or closed exception was handled
    464     return true;
    465 }
    466 
    467 // type used by select statement to capture a chan read as the selected operation that doesn't have a param to read to
    468 struct chan_read_no_ret {
    469     T retval;
    470     chan_read( T ) c_read;
    471 };
    472 __CFA_SELECT_GET_TYPE( chan_read_no_ret(T) );
    473 
    474 static inline void ?{}( chan_read_no_ret(T) & this, channel(T) & chan ) {
    475     this.c_read{ &chan, &this.retval };
    476 }
    477 
    478 static inline chan_read_no_ret(T) remove( channel(T) & chan ) { chan_read_no_ret(T) c_read{ chan }; return c_read; }
    479 static inline bool register_select( chan_read_no_ret(T) & this, select_node & node ) {
    480     this.c_read.ret = &this.retval;
    481     return register_select( this.c_read, node );
    482 }
    483 static inline bool unregister_select( chan_read_no_ret(T) & this, select_node & node ) { return unregister_select( this.c_read, node ); }
    484 static inline bool on_selected( chan_read_no_ret(T) & this, select_node & node ) { return on_selected( this.c_read, node ); }
    485 
    486 // type used by select statement to capture a chan write as the selected operation
    487 struct chan_write {
    488     T elem;
    489     channel(T) * chan;
    490 };
    491 __CFA_SELECT_GET_TYPE( chan_write(T) );
    492 
    493 static inline void ?{}( chan_write(T) & cw, channel(T) * chan, T elem ) {
    494     cw.chan = chan;
    495     memcpy( (void *)&cw.elem, (void *)&elem, sizeof(T) );
    496 }
    497 static inline chan_write(T) ?<<?( channel(T) & chan, T elem ) { chan_write(T) cw{ &chan, elem }; return cw; }
    498 static inline chan_write(T) insert( T elem, channel(T) & chan) { chan_write(T) cw{ &chan, elem }; return cw; }
    499 
    500 static inline void __handle_select_closed_write( chan_write(T) & this, select_node & node ) with(*this.chan, this) {
    501     __closed_insert( *chan, elem );
    502     // if we get here then the insert succeeded
    503     __make_select_node_available( node );
    504 }
    505 
    506 static inline bool register_select( chan_write(T) & this, select_node & node ) with(*this.chan, this) {
    507     lock( mutex_lock );
    508     node.extra = &elem; // set .extra so that if it == 0p later in on_selected it is due to channel close
    509 
    510     #ifdef CHAN_STATS
    511     if ( !closed ) p_ops++;
    512     #endif
    513 
    514     // special OR case handling
    515     if ( !node.park_counter ) {
    516         // are we special case OR and front of cons is also special case OR
    517         if ( !unlikely(closed) && !cons`isEmpty && cons`first.clause_status && !cons`first.park_counter ) {
    518             if ( !__make_select_node_pending( node ) ) {
    519                 unlock( mutex_lock );
    520                 return false;
    521             }
    522 
    523             if ( __handle_pending( cons, node ) ) {
    524                 __cons_handoff( *chan, elem );
    525                 __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node
    526                 unlock( mutex_lock );
    527                 return true;
    528             }
    529             if ( *node.clause_status == __SELECT_PENDING )
    530                 __make_select_node_unsat( node );
    531         }
    532         // check if we can complete operation. If so race to establish winner in special OR case
    533         if ( count != size || !cons`isEmpty || unlikely(closed) ) {
    534             if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
    535                 unlock( mutex_lock );
    536                 return false;
    537             }
    538         }
    539     }
    540 
    541     // if closed handle
    542     if ( unlikely(closed) ) {
    543         unlock( mutex_lock );
    544         __handle_select_closed_write( this, node );
    545         return true;
    546     }
    547 
    548     // handle blocked consumer case via handoff (buffer is implicitly empty)
    549     ConsEmpty: if ( !cons`isEmpty ) {
    550         if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty;
    551         __cons_handoff( *chan, elem );
    552         __set_avail_then_unlock( node, mutex_lock );
    553         return true;
    554     }
    555 
    556     // insert node in list if buffer is full, work will be completed by someone else
    557     if ( count == size ) {
    558         #ifdef CHAN_STATS
    559         p_blocks++;
    560         #endif
    561 
    562         insert_last( prods, node );
    563         unlock( mutex_lock );
    564         return false;
    565     } // if
    566 
    567     // otherwise carry out write either via normal insert
    568     __buf_insert( *chan, elem );
    569     __set_avail_then_unlock( node, mutex_lock );
    570     return true;
    571 }
    572 static inline bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); }
    573 
    574 static inline bool on_selected( chan_write(T) & this, select_node & node ) with(this) {
    575     if ( unlikely(node.extra == 0p) ) {
    576         if ( !exception_in_flight() ) __closed_insert( *chan, elem ); // check if woken up due to closed channel
    577         else return false;
    578     }
    579     // This is only reachable if not closed or closed exception was handled
    580     return true;
    581 }
    582 
    583 } // forall( T )
    584 
    585 
     468                insert_last( cons, node );
     469                unlock( mutex_lock );
     470                return false;
     471            }
     472
     473            // Remove from buffer
     474            __do_remove( *chan, *ret );
     475            __set_avail_then_unlock( node, mutex_lock );
     476            return true;
     477        }
     478        bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); }
     479        bool on_selected( chan_read(T) & this, select_node & node ) with(this) {
     480            if ( unlikely(node.extra == 0p) ) {
     481                if ( ! exception_in_flight() ) __closed_remove( *chan, *ret ); // check if woken up due to closed channel
     482                else return false;
     483            }
     484            // This is only reachable if not closed or closed exception was handled
     485            return true;
     486        }
     487
     488        void ?{}( chan_read_no_ret(T) & this, channel(T) & chan ) {
     489            this.c_read{ &chan, &this.retval };
     490        }
     491
     492        chan_read_no_ret(T) remove( channel(T) & chan ) { chan_read_no_ret(T) c_read{ chan }; return c_read; }
     493        bool register_select( chan_read_no_ret(T) & this, select_node & node ) {
     494            this.c_read.ret = &this.retval;
     495            return register_select( this.c_read, node );
     496        }
     497        bool unregister_select( chan_read_no_ret(T) & this, select_node & node ) { return unregister_select( this.c_read, node ); }
     498        bool on_selected( chan_read_no_ret(T) & this, select_node & node ) { return on_selected( this.c_read, node ); }
     499
     500        void ?{}( chan_write(T) & cw, channel(T) * chan, T elem ) {
     501            cw.chan = chan;
     502            memcpy( (void *)&cw.elem, (void *)&elem, sizeof(T) );
     503        }
     504        chan_write(T) ?<<?( channel(T) & chan, T elem ) { chan_write(T) cw{ &chan, elem }; return cw; }
     505        chan_write(T) insert( T elem, channel(T) & chan) { chan_write(T) cw{ &chan, elem }; return cw; }
     506
     507        void __handle_select_closed_write( chan_write(T) & this, select_node & node ) with(*this.chan, this) {
     508            __closed_insert( *chan, elem );
     509            // if we get here then the insert succeeded
     510            __make_select_node_available( node );
     511        }
     512
     513        bool register_select( chan_write(T) & this, select_node & node ) with(*this.chan, this) {
     514            lock( mutex_lock );
     515            node.extra = &elem; // set .extra so that if it == 0p later in on_selected it is due to channel close
     516       
     517            #ifdef CHAN_STATS
     518            if ( ! closed ) p_ops++;
     519            #endif
     520
     521            // special OR case handling
     522            if ( ! node.park_counter ) {
     523                // are we special case OR and front of cons is also special case OR
     524                if ( ! unlikely(closed) && ! isEmpty( cons ) && first( cons ).clause_status && ! first( cons ).park_counter ) {
     525                    if ( ! __make_select_node_pending( node ) ) {
     526                        unlock( mutex_lock );
     527                        return false;
     528                    }
     529                    if ( __handle_pending( cons, node ) ) {
     530                                        __cons_handoff( *chan, elem );
     531                                        __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node
     532                                        unlock( mutex_lock );
     533                                        return true;
     534                                }
     535                                if ( *node.clause_status == __SELECT_PENDING )
     536                                        __make_select_node_unsat( node );
     537                        }
     538                        // check if we can complete operation. If so race to establish winner in special OR case
     539                        if ( count != size || ! isEmpty( cons ) || unlikely(closed) ) {
     540                                if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     541                                        unlock( mutex_lock );
     542                                        return false;
     543                                }
     544                        }
     545                }
     546
     547                // if closed handle
     548                if ( unlikely(closed) ) {
     549                        unlock( mutex_lock );
     550                        __handle_select_closed_write( this, node );
     551                        return true;
     552                }
     553
     554                // handle blocked consumer case via handoff (buffer is implicitly empty)
     555    ConsEmpty:
     556                if ( ! isEmpty( cons ) ) {
     557                        if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty;
     558                        __cons_handoff( *chan, elem );
     559                        __set_avail_then_unlock( node, mutex_lock );
     560                        return true;
     561                }
     562
     563                // insert node in list if buffer is full, work will be completed by someone else
     564                if ( count == size ) {
     565                #ifdef CHAN_STATS
     566                        p_blocks++;
     567                #endif
     568
     569                        insert_last( prods, node );
     570                        unlock( mutex_lock );
     571                        return false;
     572                } // if
     573
     574                // otherwise carry out write either via normal insert
     575                __buf_insert( *chan, elem );
     576                __set_avail_then_unlock( node, mutex_lock );
     577                return true;
     578        }
     579        bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); }
     580
     581        bool on_selected( chan_write(T) & this, select_node & node ) with(this) {
     582                if ( unlikely(node.extra == 0p) ) {
     583                        if ( ! exception_in_flight() ) __closed_insert( *chan, elem ); // check if woken up due to closed channel
     584                        else return false;
     585                }
     586                // This is only reachable if not closed or closed exception was handled
     587                return true;
     588        }
     589} // distribution
     590
     591
  • libcfa/src/concurrency/cofor.hfa

    rf85de47 r65bd3c2  
    3333
    3434void main( cofor_runner & this ) with(this) {
    35     while ( !done || !items`isEmpty ) {
     35    while ( ! done || ! isEmpty( items ) ) {
    3636                lock( mutex_lock );
    37         runner_node * node = &try_pop_front( items );
     37        runner_node * node = &remove_first( items );
    3838                unlock( mutex_lock );
    39                 if ( !node )
     39                if ( ! node )
    4040                        continue;
    4141        func( node->value );
  • libcfa/src/concurrency/coroutine.cfa

    rf85de47 r65bd3c2  
    1010// Created On       : Mon Nov 28 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Sep 18 21:47:12 2023
    13 // Update Count     : 25
     12// Last Modified On : Fri Apr 25 06:48:19 2025
     13// Update Count     : 31
    1414//
    1515
     
    8282// helper for popping from coroutine's ehm buffer
    8383static nonlocal_exception * pop_ehm_head( coroutine$ * this ) {
    84     lock( this->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
    85     nonlocal_exception * nl_ex = pop_head( this->ehm_state.ehm_buffer );
    86     unlock( this->ehm_state.buffer_lock );
    87     return nl_ex;
     84        lock( this->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
     85        nonlocal_exception * nl_ex = pop_head( this->ehm_state.ehm_buffer );
     86        unlock( this->ehm_state.buffer_lock );
     87        return nl_ex;
    8888}
    8989
     
    9797
    9898void __stack_prepare( __stack_info_t * this, size_t create_size );
    99 static void __stack_clean  ( __stack_info_t * this );
     99static void __stack_clean( __stack_info_t * this );
    100100
    101101//-----------------------------------------------------------------------------
     
    105105
    106106        // Did we get a piece of storage ?
    107         if (this.storage || storageSize != 0) {
     107        if ( this.storage || storageSize != 0 ) {
    108108                // We either got a piece of storage or the user asked for a specific size
    109109                // Immediately create the stack
     
    128128        state = Start;
    129129        starter = 0p;
    130         last = 0p;
     130        this.last = 0p;
    131131        cancellation = 0p;
    132     ehm_state.ehm_buffer{};
    133     ehm_state.buffer_lock{};
    134     ehm_state.ehm_enabled = false;
    135 }
    136 
    137 void ^?{}(coroutine$& this) libcfa_public {
    138     // handle any leftover pending non-local exceptions
    139     nonlocal_exception * nl_ex = pop_ehm_head( &this );
    140     unsigned unhandled_ex = 0;
    141    
    142     // if any leftover exceptions handle
    143     while ( nl_ex != 0p ){
    144         unhandled_ex++;
    145         free( nl_ex->the_exception );
    146         free( nl_ex );
    147         nl_ex = pop_ehm_head( &this );
    148     }
    149 
    150     #ifdef __CFA_DEBUG__
    151     if ( unhandled_ex > 0 )
    152         printf( "Warning: Coroutine %p exited with %u pending nonlocal exceptions.\n", &this, unhandled_ex );
    153     #endif
    154 
    155         if(this.state != Halted && this.state != Start && this.state != Primed) {
     132        ehm_state.ehm_buffer{};
     133        ehm_state.buffer_lock{};
     134        ehm_state.ehm_enabled = false;
     135}
     136
     137void ^?{}( coroutine$ & this ) libcfa_public {
     138        // handle any leftover pending non-local exceptions
     139        nonlocal_exception * nl_ex = pop_ehm_head( &this );
     140        unsigned unhandled_ex = 0;
     141
     142        // if any leftover exceptions handle
     143        for ( ; nl_ex != 0p; nl_ex = pop_ehm_head( &this ) ) {
     144                unhandled_ex++;
     145                free( nl_ex->the_exception );
     146                free( nl_ex );
     147        }
     148
     149        #ifdef __CFA_DEBUG__
     150        if ( unhandled_ex > 0 )
     151                printf( "Warning: Coroutine %p exited with %u pending nonlocal exceptions.\n", &this, unhandled_ex );
     152        #endif
     153
     154        if ( this.state != Halted && this.state != Start && this.state != Primed ) {
    156155                coroutine$ * src = active_coroutine();
    157156                coroutine$ * dst = &this;
     
    174173// Part of the Public API
    175174// Not inline since only ever called once per coroutine
    176 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
    177 void prime(T& cor) libcfa_public {
    178         coroutine$* this = get_coroutine(cor);
    179         assert(this->state == Start);
     175forall( T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); } )
     176void prime( T & cor ) libcfa_public {
     177        coroutine$ * this = get_coroutine(cor);
     178        assert( this->state == Start );
    180179
    181180        this->state = Primed;
    182         resume(cor);
     181        resume( cor );
    183182}
    184183
    185184static [void *, size_t] __stack_alloc( size_t storageSize ) {
    186185        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    187         assert(__page_size != 0l);
     186        assert( __page_size != 0l );
    188187        size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
    189         size = ceiling(size, __page_size);
     188        size = ceiling( size, __page_size );
    190189
    191190        // If we are running debug, we also need to allocate a guardpage to catch stack overflows.
     
    193192        #if CFA_COROUTINE_USE_MMAP
    194193                storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
    195                 if(storage == ((void*)-1)) {
     194                if (storage == ((void*)-1)) {
    196195                        abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) );
    197196                }
     
    227226                size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t);
    228227                storage = (void *)(((intptr_t)storage) - __page_size);
    229                 if(munmap(storage, size + __page_size) == -1) {
     228                if (munmap(storage, size + __page_size) == -1) {
    230229                        abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) );
    231230                }
     
    248247        void * storage;
    249248        size_t size;
    250         if ( !this->storage ) {
     249        if ( ! this->storage ) {
    251250                userStack = false;
    252251                [storage, size] = __stack_alloc( create_size );
     
    302301                athrd->corctx_flag = false;
    303302
    304                 if(cor->state == Primed) {
     303                if (cor->state == Primed) {
    305304                        __cfactx_suspend();
    306305                }
     
    317316
    318317void defaultResumeAtHandler( exception_t * except ) {
    319     __cfaehm_allocate_exception( except );
    320     __cfaehm_begin_unwind( (void(*)(exception_t *))defaultTerminationHandler );
     318        __cfaehm_allocate_exception( except );
     319        __cfaehm_begin_unwind( (void(*)(exception_t *))defaultTerminationHandler );
    321320}
    322321
     
    328327
    329328bool poll( coroutine$ * cor ) libcfa_public {
    330     nonlocal_exception * nl_ex = pop_ehm_head( cor );
    331 
    332     // if no exceptions return false
    333     if ( nl_ex == 0p ) return false;
    334    
    335     // otherwise loop and throwResume all pending exceptions
    336     while ( nl_ex != 0p ){
     329        nonlocal_exception * nl_ex = pop_ehm_head( cor );
     330
     331        // if no exceptions return false
     332        if ( nl_ex == 0p ) return false;
     333       
     334        // otherwise loop and throwResume all pending exceptions
     335        for ( ; nl_ex != 0p; nl_ex = pop_ehm_head( cor ) ) {
    337336                ehm_cleanup ex_holder{ nl_ex->the_exception };
    338         free( nl_ex );
    339         __cfaehm_throw_resume( ex_holder.ex , defaultResumeAtHandler );
    340        
    341         nl_ex = pop_ehm_head( cor );
    342     }
    343    
    344     return true;
     337                free( nl_ex );
     338                __cfaehm_throw_resume( ex_holder.ex , defaultResumeAtHandler );
     339        }
     340       
     341        return true;
    345342}
    346343
     
    354351// user facing ehm operations
    355352forall(T & | is_coroutine(T)) {
    356     // enable/disable non-local exceptions
    357     void enable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = true; }
    358     void disable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = false; }
    359 
    360     // poll for non-local exceptions
    361     bool poll( T & cor ) libcfa_public { return poll( get_coroutine( cor ) ); }
    362 
    363     // poll iff nonlocal ehm is enabled
    364     bool checked_poll( T & cor ) libcfa_public { return get_coroutine( cor )->ehm_state.ehm_enabled ? poll( cor ) : false; }
    365 
    366     coroutine$ * resumer( T & cor ) libcfa_public { return get_coroutine( cor )->last; }
    367     coroutine$ * first_resumer( T & cor ) libcfa_public { return get_coroutine( cor )->starter; }
     353        // enable/disable non-local exceptions
     354        void enable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = true; }
     355        void disable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = false; }
     356
     357        // poll for non-local exceptions
     358        bool poll( T & cor ) libcfa_public { return poll( get_coroutine( cor ) ); }
     359
     360        // poll iff nonlocal ehm is enabled
     361        bool checked_poll( T & cor ) libcfa_public { return get_coroutine( cor )->ehm_state.ehm_enabled ? poll( cor ) : false; }
     362
     363        coroutine$ * resumer( T & cor ) libcfa_public { return get_coroutine( cor )->last; }
     364        coroutine$ * first_resumer( T & cor ) libcfa_public { return get_coroutine( cor )->starter; }
    368365}
    369366
     
    371368forall(exceptT *, T & | ehm_resume_at( exceptT, T ))
    372369void resumeAt( T & receiver, exceptT & ex ) libcfa_public {
    373     coroutine$ * cor = get_coroutine( receiver );
    374     nonlocal_exception * nl_ex = alloc();
    375     exceptT * ex_copy = alloc();
    376     memcpy( ex_copy, &ex, sizeof(exceptT) );
    377     (*nl_ex){ (exception_t *)ex_copy };
    378     lock( cor->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
    379     append( cor->ehm_state.ehm_buffer, nl_ex );
    380     unlock( cor->ehm_state.buffer_lock );
     370        coroutine$ * cor = get_coroutine( receiver );
     371        nonlocal_exception * nl_ex = alloc();
     372        exceptT * ex_copy = alloc();
     373        memcpy( ex_copy, &ex, sizeof(exceptT) );
     374        (*nl_ex){ (exception_t *)ex_copy };
     375        lock( cor->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
     376        append( cor->ehm_state.ehm_buffer, nl_ex );
     377        unlock( cor->ehm_state.buffer_lock );
    381378}
    382379
    383380forall(exceptT * | { void $throwResume(exceptT &); })
    384381void resumeAt( coroutine$ * receiver, exceptT & ex ) libcfa_public {
    385     nonlocal_exception * nl_ex = alloc();
    386     exceptT * ex_copy = alloc();
    387     memcpy( ex_copy, &ex, sizeof(exceptT) );
    388     (*nl_ex){ (exception_t *)ex_copy };
    389     lock( receiver->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
    390     append( receiver->ehm_state.ehm_buffer, nl_ex );
    391     unlock( receiver->ehm_state.buffer_lock );
     382        nonlocal_exception * nl_ex = alloc();
     383        exceptT * ex_copy = alloc();
     384        memcpy( ex_copy, &ex, sizeof(exceptT) );
     385        (*nl_ex){ (exception_t *)ex_copy };
     386        lock( receiver->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
     387        append( receiver->ehm_state.ehm_buffer, nl_ex );
     388        unlock( receiver->ehm_state.buffer_lock );
    392389}
    393390
  • libcfa/src/concurrency/coroutine.hfa

    rf85de47 r65bd3c2  
    1010// Created On       : Mon Nov 28 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Feb  2 11:31:42 2023
    13 // Update Count     : 13
     12// Last Modified On : Fri Apr 25 06:52:04 2025
     13// Update Count     : 15
    1414//
    1515
     
    2626    nonlocal_exception * next;
    2727};
    28 static inline void ?{} ( nonlocal_exception & this, exception_t * ex ) with(this) {
     28
     29static inline void ?{}( nonlocal_exception & this, exception_t * ex ) with(this) {
    2930    the_exception = ex;
    30     next = 0p;
     31    this.next = 0p;
    3132}
    3233
     
    6667// void ^?{}( coStack_t & this );
    6768
    68 void  ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize );
     69void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize );
    6970void ^?{}( coroutine$ & this );
    7071
  • libcfa/src/concurrency/future.hfa

    rf85de47 r65bd3c2  
    1010// Created On       : Wed Jan 06 17:33:18 2021
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sun Mar  2 14:45:56 2025
    13 // Update Count     : 19
     12// Last Modified On : Wed Apr 23 22:41:10 2025
     13// Update Count     : 22
    1414//
    1515
     
    6363                void reset( future(T) & this ) with(this) {
    6464                        lock( lock );
    65                         if ( ! waiters`isEmpty )
     65                        if ( ! isEmpty( waiters ) )
    6666                                abort("Attempting to reset a future with blocked waiters");
    6767                        state = FUTURE_EMPTY;
     
    8282
    8383                bool fulfil$( future(T) & this ) with(this) {   // helper
    84                         bool ret_val = ! waiters`isEmpty;
     84                        bool ret_val = ! isEmpty( waiters );
    8585                        state = FUTURE_FULFILLED;
    86                         while ( ! waiters`isEmpty ) {
     86                        while ( ! isEmpty( waiters ) ) {
    8787                                if ( !__handle_waituntil_OR( waiters ) ) // handle special waituntil OR case
    8888                                        break; // if handle_OR returns false then waiters is empty so break
    89                                 select_node &s = try_pop_front( waiters );
     89                                select_node &s = remove_first( waiters );
    9090
    9191                                if ( s.clause_status == 0p )                    // poke in result so that woken threads do not need to reacquire any locks
     
    208208
    209209                bool unregister_select( future(T) & this, select_node & s ) with(this) {
    210                         if ( ! s`isListed ) return false;
    211                         lock( lock );
    212                         if ( s`isListed ) remove( s );
     210                        if ( ! isListed( s ) ) return false;
     211                        lock( lock );
     212                        if ( isListed( s ) ) remove( s );
    213213                        unlock( lock );
    214214                        return false;
  • libcfa/src/concurrency/invoke.h

    rf85de47 r65bd3c2  
    1010// Created On       : Tue Jan 17 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Wed Aug 30 21:27:51 2023
    13 // Update Count     : 60
     12// Last Modified On : Wed Apr 23 15:27:18 2025
     13// Update Count     : 61
    1414//
    1515
     
    259259        }
    260260
    261                 static inline thread$ * volatile & ?`next ( thread$ * this ) {
     261                static inline thread$ * volatile & next( thread$ * this ) {
    262262                        return this->user_link.next;
    263263                }
  • libcfa/src/concurrency/io.cfa

    rf85de47 r65bd3c2  
    9595        static inline void __post(oneshot & this, bool kernel, unpark_hint hint) {
    9696                thread$ * t = post( this, false );
    97                 if(kernel) __kernel_unpark( t, hint );
     97                if (kernel) __kernel_unpark( t, hint );
    9898                else unpark( t, hint );
    9999        }
     
    108108                        // do the system call in a loop, repeat on interrupts
    109109                        ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8);
    110                         if( ret < 0 ) {
     110                        if ( ret < 0 ) {
    111111                                switch((int)errno) {
    112112                                case EINTR:
     
    154154                        const __u32 tail = *ctx->cq.tail;
    155155
    156                         if(head == tail) return false;
     156                        if (head == tail) return false;
    157157                }
    158158
    159159                // try a simple spinlock acquire, it's likely there are completions to drain
    160                 if(!__atomic_try_acquire(&ctx->cq.try_lock)) {
     160                if ( ! __atomic_try_acquire(&ctx->cq.try_lock)) {
    161161                        // some other processor already has it
    162162                        __STATS__( false, io.calls.locked++; )
     
    214214
    215215                        // we finished draining the completions... unless the ring buffer was full and there are more secret completions in the kernel.
    216                         if(likely(count < num)) break;
     216                        if (likely(count < num)) break;
    217217
    218218                        // the ring buffer was full, there could be more stuff in the kernel.
     
    243243
    244244                // if submitting must be submitted, do the system call
    245                 if(ctx.sq.to_submit != 0) {
     245                if (ctx.sq.to_submit != 0) {
    246246                        ioring_syscsll(ctx, 0, 0);
    247247                }
     
    278278                        // only help once every other time
    279279                        // pick a target when not helping
    280                         if(proc->io.target == UINT_MAX) {
     280                        if (proc->io.target == UINT_MAX) {
    281281                                uint64_t chaos = __tls_rand();
    282282                                // choose who to help and whether to accept helping far processors
     
    285285
    286286                                // if the processor is on the same cache line or is lucky ( 3 out of 256 odds ) help it
    287                                 if(ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
     287                                if (ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {
    288288                                        proc->io.target = other;
    289289                                }
     
    294294                                /* paranoid */ verify( io.tscs[target].t.tv != ULLONG_MAX );
    295295                                // make sure the target hasn't stopped existing since last time
    296                                 HELP: if(target < ctxs_count) {
     296                                HELP: if (target < ctxs_count) {
    297297                                        // calculate it's age and how young it could be before we give up on helping
    298298                                        const __readyQ_avg_t cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io, false);
     
    300300                                        __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no");
    301301                                        // is the target older than the cutoff, recall 0 is oldest and bigger ints are younger
    302                                         if(age <= cutoff) break HELP;
     302                                        if (age <= cutoff) break HELP;
    303303
    304304                                        // attempt to help the submission side
     
    306306
    307307                                        // attempt to help the completion side
    308                                         if(!try_acquire(io.data[target])) break HELP; // already acquire no help needed
     308                                        if ( ! try_acquire(io.data[target])) break HELP; // already acquire no help needed
    309309
    310310                                        // actually help
    311                                         if(!__cfa_do_drain( io.data[target], cltr )) break HELP;
     311                                        if ( ! __cfa_do_drain( io.data[target], cltr )) break HELP;
    312312
    313313                                        // track we did help someone
     
    322322
    323323                // Drain the local queue
    324                 if(try_acquire( proc->io.ctx )) {
     324                if (try_acquire( proc->io.ctx )) {
    325325                        local = __cfa_do_drain( proc->io.ctx, cltr );
    326326                }
     
    390390
    391391                // If we don't have enough sqes, fail
    392                 if((ftail - fhead) < want) { return false; }
     392                if ((ftail - fhead) < want) { return false; }
    393393
    394394                // copy all the indexes we want from the available list
     
    422422
    423423                // We can proceed to the fast path
    424                 if( __alloc(ctx, idxs, want) ) {
     424                if ( __alloc(ctx, idxs, want) ) {
    425425                        // Allocation was successful
    426426                        __STATS__( true, io.alloc.fast += 1; )
     
    456456        // barebones logic to submit a group of sqes
    457457        static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lock) {
    458                 if(!lock)
     458                if ( ! lock)
    459459                        lock( ctx->ext_sq.lock __cfaabi_dbg_ctx2 );
    460460                // We can proceed to the fast path
     
    478478                __atomic_store_n(&ctx->proc->io.dirty  , true, __ATOMIC_RELAXED);
    479479
    480                 if(!lock)
     480                if ( ! lock)
    481481                        unlock( ctx->ext_sq.lock );
    482482        }
     
    487487                __submit_only(ctx, idxs, have, false);
    488488
    489                 if(sq.to_submit > 30) {
     489                if (sq.to_submit > 30) {
    490490                        __tls_stats()->io.flush.full++;
    491491                        __cfa_io_flush( ctx->proc );
    492492                }
    493                 if(!lazy) {
     493                if ( ! lazy ) {
    494494                        __tls_stats()->io.flush.eager++;
    495495                        __cfa_io_flush( ctx->proc );
     
    503503
    504504                disable_interrupts();
    505                 __STATS__( true, if(!lazy) io.submit.eagr += 1; )
     505                __STATS__( true, if ( ! lazy ) io.submit.eagr += 1; )
    506506                struct processor * proc = __cfaabi_tls.this_processor;
    507507                io_context$ * ctx = proc->io.ctx;
     
    510510
    511511                // Can we proceed to the fast path
    512                 if( ctx == inctx )              // We have the right instance?
     512                if ( ctx == inctx )             // We have the right instance?
    513513                {
    514514                        // yes! fast submit
     
    564564                __u32 count = chead - phead;
    565565
    566                 if(count == 0) {
     566                if (count == 0) {
    567567                        return 0;
    568568                }
     
    594594                lock( queue.lock __cfaabi_dbg_ctx2 );
    595595                {
    596                         was_empty = queue.queue`isEmpty;
     596                        was_empty = isEmpty( queue.queue );
    597597
    598598                        // Add our request to the list
     
    632632        // notify the arbiter that new allocations are available
    633633        static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) {
    634                 /* paranoid */ verify( !this.pending.queue`isEmpty );
     634                /* paranoid */ verify( ! isEmpty( this.pending.queue ) );
    635635                /* paranoid */ verify( __preemption_enabled() );
    636636
     
    642642                        // as long as there are pending allocations try to satisfy them
    643643                        // for simplicity do it in FIFO order
    644                         while( !this.pending.queue`isEmpty ) {
     644                        while( ! isEmpty( this.pending.queue ) ) {
    645645                                // get first pending allocs
    646646                                __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head;
    647                                 __pending_alloc & pa = (__pending_alloc&)(this.pending.queue`first);
     647                                __pending_alloc & pa = (__pending_alloc&)( first( this.pending.queue ));
    648648
    649649                                // check if we have enough to satisfy the request
    650                                 if( have > pa.want ) goto DONE;
     650                                if ( have > pa.want ) goto DONE;
    651651
    652652                                // if there are enough allocations it means we can drop the request
    653                                 try_pop_front( this.pending.queue );
     653                                remove_first( this.pending.queue );
    654654
    655655                                /* paranoid */__attribute__((unused)) bool ret =
     
    676676        // short hand to avoid the mutual exclusion of the pending is empty regardless
    677677        static void __ioarbiter_notify( io_context$ & ctx ) {
    678                 if(empty( ctx.arbiter->pending )) return;
     678                if (empty( ctx.arbiter->pending )) return;
    679679                __ioarbiter_notify( *ctx.arbiter, &ctx );
    680680        }
     
    700700                // if this is the first to be enqueued, signal the processor in an attempt to speed up flushing
    701701                // if it's not the first enqueue, a signal is already in transit
    702                 if( we ) {
     702                if ( we ) {
    703703                        sigval_t value = { PREEMPT_IO };
    704704                        __cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value);
     
    716716        static void __ioarbiter_flush( io_context$ & ctx, bool kernel ) {
    717717                // if there are no external operations just return
    718                 if(empty( ctx.ext_sq )) return;
     718                if ( empty( ctx.ext_sq ) ) return;
    719719
    720720                // stats and logs
     
    727727                        // pop each operation one at a time.
    728728                        // There is no wait morphing because of the io sq ring
    729                         while( !ctx.ext_sq.queue`isEmpty ) {
     729                        while( ! isEmpty( ctx.ext_sq.queue ) ) {
    730730                                // drop the element from the queue
    731                                 __external_io & ei = (__external_io&)try_pop_front( ctx.ext_sq.queue );
     731                                __external_io & ei = (__external_io&)remove_first( ctx.ext_sq.queue );
    732732
    733733                                // submit it
  • libcfa/src/concurrency/kernel.cfa

    rf85de47 r65bd3c2  
    1010// Created On       : Tue Jan 17 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Jan  9 08:42:05 2023
    13 // Update Count     : 77
     12// Last Modified On : Fri Apr 25 07:02:42 2025
     13// Update Count     : 82
    1414//
    1515
     
    4545#pragma GCC diagnostic pop
    4646
    47 #if !defined(__CFA_NO_STATISTICS__)
     47#if ! defined(__CFA_NO_STATISTICS__)
    4848        #define __STATS_DEF( ...) __VA_ARGS__
    4949#else
     
    158158
    159159        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
    160         #if !defined(__CFA_NO_STATISTICS__)
    161                 if( this->print_halts ) {
     160        #if ! defined(__CFA_NO_STATISTICS__)
     161                if ( this->print_halts ) {
    162162                        __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this);
    163163                }
     
    169169
    170170                // if we need to run some special setup, now is the time to do it.
    171                 if(this->init.thrd) {
     171                if (this->init.thrd) {
    172172                        this->init.thrd->curr_cluster = this->cltr;
    173173                        __run_thread(this, this->init.thrd);
     
    185185                        readyThread = __next_thread( this->cltr );
    186186
    187                         if( !readyThread ) {
     187                        if ( ! readyThread ) {
    188188                                // there is no point in holding submissions if we are idle
    189189                                __IO_STATS__(true, io.flush.idle++; )
     
    196196                        }
    197197
    198                         if( !readyThread ) for(5) {
     198                        if ( ! readyThread ) for(5) {
    199199                                readyThread = __next_thread_slow( this->cltr );
    200200
    201                                 if( readyThread ) break;
     201                                if ( readyThread ) break;
    202202
    203203                                // It's unlikely we still I/O to submit, but the arbiter could
     
    210210
    211211                        HALT:
    212                         if( !readyThread ) {
     212                        if ( ! readyThread ) {
    213213                                // Don't block if we are done
    214                                 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
     214                                if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
    215215
    216216                                // Push self to idle stack
    217                                 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
     217                                if ( ! mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;
    218218
    219219                                // Confirm the ready-queue is empty
    220220                                readyThread = __next_thread_search( this->cltr );
    221                                 if( readyThread ) {
     221                                if ( readyThread ) {
    222222                                        // A thread was found, cancel the halt
    223223                                        mark_awake(this->cltr->procs, * this);
     
    247247
    248248                        // Are we done?
    249                         if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
    250 
    251                         if(__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
     249                        if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;
     250
     251                        if (__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && ! __atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {
    252252                                __IO_STATS__(true, io.flush.dirty++; )
    253253                                __cfa_io_flush( this );
     
    263263        post( this->terminated );
    264264
    265         if(this == mainProcessor) {
     265        if (this == mainProcessor) {
    266266                // HACK : the coroutine context switch expects this_thread to be set
    267267                // and it make sense for it to be set in all other cases except here
     
    294294
    295295        // Actually run the thread
    296         RUNNING:  while(true) {
     296        RUNNING:
     297        while( true ) {
    297298                thrd_dst->preempted = __NO_PREEMPTION;
    298299
     
    339340                // In case 2, we lost the race so we now own the thread.
    340341
    341                 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
     342                if (unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    342343                        // Reset the this_thread now that we know
    343344                        // the state isn't active anymore
     
    349350                }
    350351
    351                 if(unlikely(thrd_dst->state == Halting)) {
     352                if (unlikely(thrd_dst->state == Halting)) {
    352353                        // Reset the this_thread now that we know
    353354                        // the state isn't active anymore
     
    418419        }
    419420
    420         #if !defined(__CFA_NO_STATISTICS__)
     421        #if ! defined(__CFA_NO_STATISTICS__)
    421422                /* paranoid */ verify( thrd_src->last_proc != 0p );
    422                 if(thrd_src->last_proc != kernelTLS().this_processor) {
     423                if (thrd_src->last_proc != kernelTLS().this_processor) {
    423424                        __tls_stats()->ready.threads.migration++;
    424425                }
     
    440441        /* paranoid */ verify( thrd->curr_cluster );
    441442        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
    442         /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     443        /* paranoid */  if ( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
    443444                                        "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
    444         /* paranoid */  if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
     445        /* paranoid */  if ( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,
    445446                                        "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
    446447        /* paranoid */ #endif
     
    463464        __wake_one( cl );
    464465
    465         #if !defined(__CFA_NO_STATISTICS__)
    466                 if( kernelTLS().this_stats ) {
     466        #if ! defined(__CFA_NO_STATISTICS__)
     467                if ( kernelTLS().this_stats ) {
    467468                        __tls_stats()->ready.threads.threads++;
    468                         if(outside) {
     469                        if (outside) {
    469470                                __tls_stats()->ready.threads.extunpark++;
    470471                        }
     
    542543        /* paranoid */ verify( ready_schedule_islocked());
    543544
    544         if( !thrd ) return;
    545 
    546         if(__must_unpark(thrd)) {
     545        if ( ! thrd ) return;
     546
     547        if (__must_unpark(thrd)) {
    547548                // Wake lost the race,
    548549                __schedule_thread( thrd, hint );
     
    554555
    555556void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public {
    556         if( !thrd ) return;
    557 
    558         if(__must_unpark(thrd)) {
     557        if ( ! thrd ) return;
     558
     559        if (__must_unpark(thrd)) {
    559560                disable_interrupts();
    560561                        // Wake lost the race,
     
    592593                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
    593594
    594                 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
    595                 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
    596                 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
     595                if ( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }
     596                if ( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }
     597                if ( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }
    597598
    598599                thrd->state = Halting;
     
    618619                // If that is the case, abandon the preemption.
    619620                bool preempted = false;
    620                 if(thrd->rdy_link.next == 0p) {
     621                if (thrd->rdy_link.next == 0p) {
    621622                        preempted = true;
    622623                        thrd->preempted = reason;
     
    641642
    642643        // If no one is sleeping: we are done
    643         if( fdp == 0p ) return;
     644        if ( fdp == 0p ) return;
    644645
    645646        int fd = 1;
    646         if( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
     647        if ( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {
    647648                fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED);
    648649        }
     
    652653        case 0:
    653654                // If the processor isn't ready to sleep then the exchange will already wake it up
    654                 #if !defined(__CFA_NO_STATISTICS__)
    655                         if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
     655                #if ! defined(__CFA_NO_STATISTICS__)
     656                        if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;
    656657                        } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); }
    657658                #endif
     
    659660        case 1:
    660661                // If someone else already said they will wake them: we are done
    661                 #if !defined(__CFA_NO_STATISTICS__)
    662                         if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
     662                #if ! defined(__CFA_NO_STATISTICS__)
     663                        if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;
    663664                        } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); }
    664665                #endif
     
    670671                /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret );
    671672
    672                 #if !defined(__CFA_NO_STATISTICS__)
    673                         if( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
     673                #if ! defined(__CFA_NO_STATISTICS__)
     674                        if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;
    674675                        } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); }
    675676                #endif
     
    710711
    711712                // Someone already told us to wake-up! No time for a nap.
    712                 if(expected == 1) { return; }
     713                if (expected == 1) { return; }
    713714
    714715                // Try to mark that we are going to sleep
    715                 if(__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
     716                if (__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false,  __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {
    716717                        // Every one agreed, taking a nap
    717718                        break;
     
    720721
    721722
    722         #if !defined(__CFA_NO_STATISTICS__)
    723                 if(this->print_halts) {
     723        #if ! defined(__CFA_NO_STATISTICS__)
     724                if (this->print_halts) {
    724725                        __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl());
    725726                }
     
    731732                eventfd_t val;
    732733                ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) );
    733                 if(ret < 0) {
     734                if (ret < 0) {
    734735                        switch((int)errno) {
    735736                        case EAGAIN:
     
    746747        }
    747748
    748         #if !defined(__CFA_NO_STATISTICS__)
    749                 if(this->print_halts) {
     749        #if ! defined(__CFA_NO_STATISTICS__)
     750                if (this->print_halts) {
    750751                        __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl());
    751752                }
     
    759760
    760761        /* paranoid */ verify( ! __preemption_enabled() );
    761         if(!try_lock( this )) return false;
     762        if ( ! try_lock( this )) return false;
    762763                this.idle++;
    763764                /* paranoid */ verify( this.idle <= this.total );
     
    784785                        // update the pointer to the head wait context
    785786                        struct __fd_waitctx * wctx = 0;
    786                         if(!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;
     787                        if ( ! isEmpty( this.idles )) wctx = &first( this. idles ).idle_wctx;
    787788                        __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST);
    788789                }
     
    798799        thread$ * thrd = __cfaabi_tls.this_thread;
    799800
    800         if(thrd) {
     801        if (thrd) {
    801802                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
    802803                __cfaabi_bits_write( STDERR_FILENO, abort_text, len );
     
    847848//-----------------------------------------------------------------------------
    848849// Statistics
    849 #if !defined(__CFA_NO_STATISTICS__)
     850#if ! defined(__CFA_NO_STATISTICS__)
    850851        void print_halts( processor & this ) libcfa_public {
    851852                this.print_halts = true;
     
    855856                /* paranoid */ verify( cltr->stats );
    856857
    857                 processor * it = &list`first;
     858                processor * it = &first( list );
    858859                for(unsigned i = 0; i < count; i++) {
    859860                        /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
     
    861862                        // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it );
    862863                        __tally_stats( cltr->stats, it->local_data->this_stats );
    863                         it = &(*it)`next;
     864                        it = &next( *it );
    864865                }
    865866        }
  • libcfa/src/concurrency/kernel/cluster.cfa

    rf85de47 r65bd3c2  
    234234
    235235static void assign_list(unsigned & valrq, unsigned & valio, dlist(struct processor) & list, unsigned count) {
    236         struct processor * it = &list`first;
     236        struct processor * it = &first( list );
    237237        for(unsigned i = 0; i < count; i++) {
    238238                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
     
    245245                        valio += __shard_factor.io;
    246246                #endif
    247                 it = &(*it)`next;
     247                it = &next( *it );
    248248        }
    249249}
     
    258258#if defined(CFA_HAVE_LINUX_IO_URING_H)
    259259        static void assign_io(io_context$ ** data, size_t count, dlist(struct processor) & list) {
    260                 struct processor * it = &list`first;
     260                struct processor * it = &first( list );
    261261                while(it) {
    262262                        /* paranoid */ verifyf( it, "Unexpected null iterator\n");
    263263                        /* paranoid */ verifyf( it->io.ctx->cq.id < count, "Processor %p has id %u above count %zu\n", it, it->rdq.id, count);
    264264                        data[it->io.ctx->cq.id] = it->io.ctx;
    265                         it = &(*it)`next;
     265                        it = &next( *it );
    266266                }
    267267        }
  • libcfa/src/concurrency/kernel/private.hfa

    rf85de47 r65bd3c2  
    1010// Created On       : Mon Feb 13 12:27:26 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Thu Mar  2 16:04:46 2023
    13 // Update Count     : 11
     12// Last Modified On : Mon Apr 21 18:08:48 2025
     13// Update Count     : 12
    1414//
    1515
     
    287287static inline [unsigned, uint_fast32_t] ready_mutate_register() {
    288288        unsigned id = register_proc_id();
    289         uint_fast32_t last = ready_mutate_lock();
    290         return [id, last];
     289        return [id, ready_mutate_lock()];
    291290}
    292291
  • libcfa/src/concurrency/kernel/startup.cfa

    rf85de47 r65bd3c2  
    6969//-----------------------------------------------------------------------------
    7070// Start and stop routine for the kernel, declared first to make sure they run first
    71 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    72 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     71static void __kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     72static void __kernel_shutdown(void) __attribute__(( destructor( STARTUP_PRIORITY_KERNEL ) ));
    7373
    7474//-----------------------------------------------------------------------------
     
    7878static void * __invoke_processor(void * arg);
    7979static void __kernel_first_resume( processor * this );
    80 static void __kernel_last_resume ( processor * this );
     80static void __kernel_last_resume( processor * this );
    8181static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT);
    8282static void deinit(processor & this);
     
    9999extern void __kernel_alarm_shutdown(void);
    100100extern void __cfa_io_start( processor * );
    101 extern void __cfa_io_stop ( processor * );
     101extern void __cfa_io_stop( processor * );
    102102
    103103//-----------------------------------------------------------------------------
     
    110110//-----------------------------------------------------------------------------
    111111// Kernel storage
    112 KERNEL_STORAGE(cluster,              mainCluster);
    113 KERNEL_STORAGE(processor,            mainProcessor);
    114 KERNEL_STORAGE(thread$,              mainThread);
    115 KERNEL_STORAGE(__stack_t,            mainThreadCtx);
     112KERNEL_STORAGE(cluster, mainCluster);
     113KERNEL_STORAGE(processor, mainProcessor);
     114KERNEL_STORAGE(thread$, mainThread);
     115KERNEL_STORAGE(__stack_t, mainThreadCtx);
    116116#if !defined(__CFA_NO_STATISTICS__)
    117117KERNEL_STORAGE(__stats_t, mainProcStats);
    118118#endif
    119119
    120 cluster              * mainCluster libcfa_public;
    121 processor            * mainProcessor;
    122 thread$              * mainThread;
     120cluster * mainCluster libcfa_public;
     121processor * mainProcessor;
     122thread$ * mainThread;
    123123
    124124extern "C" {
     
    150150// Struct to steal stack
    151151struct current_stack_info_t {
    152         __stack_t * storage;  // pointer to stack object
    153         void * base;          // base of stack
    154         void * limit;         // stack grows towards stack limit
    155         void * context;       // address of cfa_context_t
     152        __stack_t * storage;                                                            // pointer to stack object
     153        void * base;                                                                            // base of stack
     154        void * limit;                                                                           // stack grows towards stack limit
     155        void * context;                                                                         // address of cfa_context_t
    156156};
    157157
     
    234234        //initialize the global state variables
    235235        __cfaabi_tls.this_processor = mainProcessor;
    236         __cfaabi_tls.this_thread    = mainThread;
     236        __cfaabi_tls.this_thread = mainThread;
    237237
    238238        #if !defined( __CFA_NO_STATISTICS__ )
     
    355355        processor * proc = (processor *) arg;
    356356        __cfaabi_tls.this_processor = proc;
    357         __cfaabi_tls.this_thread    = 0p;
     357        __cfaabi_tls.this_thread = 0p;
    358358        __cfaabi_tls.preemption_state.[enabled, disable_count] = [false, 1];
    359359        proc->local_data = &__cfaabi_tls;
     
    477477        stack.storage = info->storage;
    478478        with(*stack.storage) {
    479                 limit     = info->limit;
    480                 base      = info->base;
     479                limit = info->limit;
     480                base = info->base;
    481481        }
    482482        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage;
     
    485485        state = Start;
    486486        starter = 0p;
    487         last = 0p;
     487        this.last = 0p;
    488488        cancellation = 0p;
    489     ehm_state.ehm_buffer{};
    490     ehm_state.buffer_lock{};
    491     ehm_state.ehm_enabled = false;
     489        ehm_state.ehm_buffer{};
     490        ehm_state.buffer_lock{};
     491        ehm_state.ehm_enabled = false;
    492492}
    493493
     
    502502        self_mon_p = &self_mon;
    503503        rdy_link.next = 0p;
    504         rdy_link.ts   = MAX;
     504        rdy_link.ts = MAX;
    505505        user_link.next = 0p;
    506506        user_link.prev = 0p;
     
    509509        preferred = ready_queue_new_preferred();
    510510        last_proc = 0p;
    511         PRNG_SET_SEED( random_state,  __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() );
     511        PRNG_SET_SEED( random_state, __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() );
    512512        #if defined( __CFA_WITH_VERIFY__ )
    513513                executing = 0p;
     
    531531        this.name = name;
    532532        this.cltr = &_cltr;
    533     __atomic_add_fetch( &_cltr.procs.constructed, 1u, __ATOMIC_RELAXED );
     533        __atomic_add_fetch( &_cltr.procs.constructed, 1u, __ATOMIC_RELAXED );
    534534        this.rdq.its = 0;
    535535        this.rdq.itr = 0;
    536         this.rdq.id  = 0;
     536        this.rdq.id = 0;
    537537        this.rdq.target = MAX;
    538538        this.rdq.last = MAX;
     
    545545        this.io.ctx = 0p;
    546546        this.io.pending = false;
    547         this.io.dirty   = false;
     547        this.io.dirty = false;
    548548
    549549        this.init.thrd = initT;
     
    599599        __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
    600600
    601     __atomic_sub_fetch( &this.cltr->procs.constructed, 1u, __ATOMIC_RELAXED );
     601        __atomic_sub_fetch( &this.cltr->procs.constructed, 1u, __ATOMIC_RELAXED );
    602602
    603603        __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
     
    619619// Cluster
    620620static void ?{}(__cluster_proc_list & this) {
    621         this.fdw   = 0p;
    622         this.idle  = 0;
    623     this.constructed = 0;
     621        this.fdw = 0p;
     622        this.idle = 0;
     623        this.constructed = 0;
    624624        this.total = 0;
    625625}
     
    706706//-----------------------------------------------------------------------------
    707707// Global Queues
    708 static void doregister( cluster     & cltr ) {
    709         lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
     708static void doregister( cluster & cltr ) {
     709        lock( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
    710710        push_front( __cfa_dbg_global_clusters.list, cltr );
    711         unlock    ( __cfa_dbg_global_clusters.lock );
    712 }
    713 
    714 static void unregister( cluster     & cltr ) {
    715         lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
     711        unlock( __cfa_dbg_global_clusters.lock );
     712}
     713
     714static void unregister( cluster & cltr ) {
     715        lock( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
    716716        remove( __cfa_dbg_global_clusters.list, cltr );
    717717        unlock( __cfa_dbg_global_clusters.lock );
     
    719719
    720720void doregister( cluster * cltr, thread$ & thrd ) {
    721         lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
     721        lock(cltr->thread_list_lock __cfaabi_dbg_ctx2);
    722722        cltr->nthreads += 1;
    723723        insert_first(cltr->threads, thrd);
    724         unlock    (cltr->thread_list_lock);
     724        unlock(cltr->thread_list_lock);
    725725}
    726726
    727727void unregister( cluster * cltr, thread$ & thrd ) {
    728         lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
     728        lock(cltr->thread_list_lock __cfaabi_dbg_ctx2);
    729729        {
    730730                tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) = void;
  • libcfa/src/concurrency/locks.cfa

    rf85de47 r65bd3c2  
    7979        // lock is held by some other thread
    8080        if ( owner != 0p && owner != thrd ) {
    81         select_node node;
     81                select_node node;
    8282                insert_last( blocked_threads, node );
    8383                wait_count++;
    8484                unlock( lock );
    8585                park( );
    86         return;
     86                return;
    8787        } else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread
    8888                recursion_count++;
     
    9191                recursion_count = 1;
    9292        }
    93     unlock( lock );
     93        unlock( lock );
    9494}
    9595
     
    115115
    116116static inline void pop_node( blocking_lock & this ) with( this ) {
    117     __handle_waituntil_OR( blocked_threads );
    118     select_node * node = &try_pop_front( blocked_threads );
    119     if ( node ) {
    120         wait_count--;
    121         owner = node->blocked_thread;
    122         recursion_count = 1;
    123         // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
    124         wake_one( blocked_threads, *node );
    125     } else {
    126         owner = 0p;
    127         recursion_count = 0;
    128     }
     117        __handle_waituntil_OR( blocked_threads );
     118        select_node * node = &remove_first( blocked_threads );
     119        if ( node ) {
     120                wait_count--;
     121                owner = node->blocked_thread;
     122                recursion_count = 1;
     123                // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
     124                wake_one( blocked_threads, *node );
     125        } else {
     126                owner = 0p;
     127                recursion_count = 0;
     128        }
    129129}
    130130
     
    160160                unpark( t );
    161161        }
    162     unlock( lock );
     162        unlock( lock );
    163163}
    164164
     
    172172        pop_node( this );
    173173
    174     select_node node;
    175     active_thread()->link_node = (void *)&node;
    176         unlock( lock );
    177 
    178     pre_park_then_park( pp_fn, pp_datum );
     174        select_node node;
     175        active_thread()->link_node = (void *)&node;
     176        unlock( lock );
     177
     178        pre_park_then_park( pp_fn, pp_datum );
    179179
    180180        return ret;
     
    187187// waituntil() support
    188188bool register_select( blocking_lock & this, select_node & node ) with(this) {
    189     lock( lock __cfaabi_dbg_ctx2 );
     189        lock( lock __cfaabi_dbg_ctx2 );
    190190        thread$ * thrd = active_thread();
    191191
     
    193193        /* paranoid */ verifyf( owner != thrd || multi_acquisition, "Single acquisition lock holder (%p) attempted to reacquire the lock %p resulting in a deadlock.", owner, &this );
    194194
    195     if ( !node.park_counter && ( (owner == thrd && multi_acquisition) || owner == 0p ) ) { // OR special case
    196         if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
    197            unlock( lock );
    198            return false;
    199         }
    200     }
     195        if ( !node.park_counter && ( (owner == thrd && multi_acquisition) || owner == 0p ) ) { // OR special case
     196                if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     197                        unlock( lock );
     198                        return false;
     199                }
     200        }
    201201
    202202        // lock is held by some other thread
     
    205205                wait_count++;
    206206                unlock( lock );
    207         return false;
     207                return false;
    208208        } else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread
    209209                recursion_count++;
     
    213213        }
    214214
    215     if ( node.park_counter ) __make_select_node_available( node );
    216     unlock( lock );
    217     return true;
     215        if ( node.park_counter ) __make_select_node_available( node );
     216        unlock( lock );
     217        return true;
    218218}
    219219
    220220bool unregister_select( blocking_lock & this, select_node & node ) with(this) {
    221     lock( lock __cfaabi_dbg_ctx2 );
    222     if ( node`isListed ) {
    223         remove( node );
    224         wait_count--;
    225         unlock( lock );
    226         return false;
    227     }
    228    
    229     if ( owner == active_thread() ) {
    230         /* paranoid */ verifyf( recursion_count == 1 || multi_acquisition, "Thread %p attempted to unlock owner lock %p in waituntil unregister, which is not recursive but has a recursive count of %zu", active_thread(), &this, recursion_count );
    231         // if recursion count is zero release lock and set new owner if one is waiting
    232         recursion_count--;
    233         if ( recursion_count == 0 ) {
    234             pop_node( this );
    235         }
    236     }
    237         unlock( lock );
    238     return false;
     221        lock( lock __cfaabi_dbg_ctx2 );
     222        if ( isListed( node ) ) {
     223                remove( node );
     224                wait_count--;
     225                unlock( lock );
     226                return false;
     227        }
     228       
     229        if ( owner == active_thread() ) {
     230                /* paranoid */ verifyf( recursion_count == 1 || multi_acquisition, "Thread %p attempted to unlock owner lock %p in waituntil unregister, which is not recursive but has a recursive count of %zu", active_thread(), &this, recursion_count );
     231                // if recursion count is zero release lock and set new owner if one is waiting
     232                recursion_count--;
     233                if ( recursion_count == 0 ) {
     234                        pop_node( this );
     235                }
     236        }
     237        unlock( lock );
     238        return false;
    239239}
    240240
     
    265265                //      may still be called after a thread has been removed from the queue but
    266266                //      before the alarm is unregistered
    267                 if ( (*info_thd)`isListed ) {   // is thread on queue
     267                if ( isListed( *info_thd ) ) {                                  // is thread on queue
    268268                        info_thd->signalled = false;
    269269                        // remove this thread O(1)
    270270                        remove( *info_thd );
    271271                        cond->count--;
    272                         if( info_thd->lock ) {
     272                        if ( info_thd->lock ) {
    273273                                // call lock's on_notify if a lock was passed
    274274                                on_notify(*info_thd->lock, info_thd->t);
     
    304304                //      may still be called after a thread has been removed from the queue but
    305305                //      before the alarm is unregistered
    306                 if ( (*info_thd)`isListed ) {   // is thread on queue
     306                if ( isListed( *info_thd ) ) {                                  // is thread on queue
    307307                        info_thd->signalled = false;
    308308                        // remove this thread O(1)
     
    332332
    333333        static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
    334                 if(&popped != 0p) {
     334                if (&popped != 0p) {
    335335                        popped.signalled = true;
    336336                        count--;
     
    347347        bool notify_one( condition_variable(L) & this ) with( this ) {
    348348                lock( lock __cfaabi_dbg_ctx2 );
    349                 bool ret = ! blocked_threads`isEmpty;
    350                 process_popped(this, try_pop_front( blocked_threads ));
     349                bool ret = ! isEmpty( blocked_threads );
     350                process_popped(this, remove_first( blocked_threads ));
    351351                unlock( lock );
    352352                return ret;
     
    355355        bool notify_all( condition_variable(L) & this ) with(this) {
    356356                lock( lock __cfaabi_dbg_ctx2 );
    357                 bool ret = ! blocked_threads`isEmpty;
    358                 while( ! blocked_threads`isEmpty ) {
    359                         process_popped(this, try_pop_front( blocked_threads ));
     357                bool ret = ! isEmpty( blocked_threads );
     358                while( ! isEmpty( blocked_threads ) ) {
     359                        process_popped(this, remove_first( blocked_threads ));
    360360                }
    361361                unlock( lock );
     
    364364
    365365        uintptr_t front( condition_variable(L) & this ) with(this) {
    366                 return blocked_threads`isEmpty ? NULL : blocked_threads`first.info;
     366                return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info;
    367367        }
    368368
    369369        bool empty( condition_variable(L) & this ) with(this) {
    370370                lock( lock __cfaabi_dbg_ctx2 );
    371                 bool ret = blocked_threads`isEmpty;
     371                bool ret = isEmpty( blocked_threads );
    372372                unlock( lock );
    373373                return ret;
     
    382382        }
    383383
    384     static size_t block_and_get_recursion( info_thread(L) & i, __cfa_pre_park pp_fn, void * pp_datum ) {
    385         size_t recursion_count = 0;
     384        static size_t block_and_get_recursion( info_thread(L) & i, __cfa_pre_park pp_fn, void * pp_datum ) {
     385                size_t recursion_count = 0;
    386386                if ( i.lock ) // if lock was passed get recursion count to reset to after waking thread
    387387                        recursion_count = on_wait( *i.lock, pp_fn, pp_datum ); // this call blocks
    388388                else
    389             pre_park_then_park( pp_fn, pp_datum );
    390         return recursion_count;
    391     }
    392     static size_t block_and_get_recursion( info_thread(L) & i ) { return block_and_get_recursion( i, pre_park_noop, 0p ); }
     389                        pre_park_then_park( pp_fn, pp_datum );
     390                return recursion_count;
     391        }
     392        static size_t block_and_get_recursion( info_thread(L) & i ) { return block_and_get_recursion( i, pre_park_noop, 0p ); }
    393393
    394394        // helper for wait()'s' with no timeout
    395395        static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
    396396                lock( lock __cfaabi_dbg_ctx2 );
    397         enqueue_thread( this, &i );
     397                enqueue_thread( this, &i );
    398398                unlock( lock );
    399399
    400400                // blocks here
    401         size_t recursion_count = block_and_get_recursion( i );
     401                size_t recursion_count = block_and_get_recursion( i );
    402402
    403403                // resets recursion count here after waking
     
    409409                queue_info_thread( this, i );
    410410
    411     static void cond_alarm_register( void * node_ptr ) { register_self( (alarm_node_t *)node_ptr ); }
     411        static void cond_alarm_register( void * node_ptr ) { register_self( (alarm_node_t *)node_ptr ); }
    412412
    413413        // helper for wait()'s' with a timeout
    414414        static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
    415415                lock( lock __cfaabi_dbg_ctx2 );
    416         enqueue_thread( this, &info );
     416                enqueue_thread( this, &info );
    417417                alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info };
    418418                unlock( lock );
    419419
    420420                // blocks here and registers alarm node before blocking after releasing locks to avoid deadlock
    421         size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) );
     421                size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) );
    422422                // park();
    423423
     
    434434                return i.signalled;
    435435
    436         void wait( condition_variable(L) & this                        ) with(this) { WAIT( 0, 0p    ) }
    437         void wait( condition_variable(L) & this, uintptr_t info        ) with(this) { WAIT( info, 0p ) }
    438         void wait( condition_variable(L) & this, L & l                 ) with(this) { WAIT( 0, &l    ) }
     436        void wait( condition_variable(L) & this ) with(this) { WAIT( 0, 0p ) }
     437        void wait( condition_variable(L) & this, uintptr_t info ) with(this) { WAIT( info, 0p ) }
     438        void wait( condition_variable(L) & this, L & l  ) with(this) { WAIT( 0, &l ) }
    439439        void wait( condition_variable(L) & this, L & l, uintptr_t info ) with(this) { WAIT( info, &l ) }
    440440
    441         bool wait( condition_variable(L) & this, Duration duration                        ) with(this) { WAIT_TIME( 0  , 0p , duration ) }
    442         bool wait( condition_variable(L) & this, uintptr_t info, Duration duration        ) with(this) { WAIT_TIME( info, 0p , duration ) }
    443         bool wait( condition_variable(L) & this, L & l, Duration duration                 ) with(this) { WAIT_TIME( 0  , &l , duration ) }
     441        bool wait( condition_variable(L) & this, Duration duration ) with(this) { WAIT_TIME( 0 , 0p , duration ) }
     442        bool wait( condition_variable(L) & this, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, 0p , duration ) }
     443        bool wait( condition_variable(L) & this, L & l, Duration duration  ) with(this) { WAIT_TIME( 0 , &l , duration ) }
    444444        bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, &l , duration ) }
    445445
    446446        //-----------------------------------------------------------------------------
    447447        // fast_cond_var
    448         void  ?{}( fast_cond_var(L) & this ){
     448        void ?{}( fast_cond_var(L) & this ){
    449449                this.blocked_threads{};
    450450                #ifdef __CFA_DEBUG__
     
    455455
    456456        bool notify_one( fast_cond_var(L) & this ) with(this) {
    457                 bool ret = ! blocked_threads`isEmpty;
     457                bool ret = ! isEmpty( blocked_threads );
    458458                if ( ret ) {
    459                         info_thread(L) & popped = try_pop_front( blocked_threads );
     459                        info_thread(L) & popped = remove_first( blocked_threads );
    460460                        on_notify(*popped.lock, popped.t);
    461461                }
     
    463463        }
    464464        bool notify_all( fast_cond_var(L) & this ) with(this) {
    465                 bool ret = ! blocked_threads`isEmpty;
    466                 while( ! blocked_threads`isEmpty ) {
    467                         info_thread(L) & popped = try_pop_front( blocked_threads );
     465                bool ret = ! isEmpty( blocked_threads );
     466                while( ! isEmpty( blocked_threads ) ) {
     467                        info_thread(L) & popped = remove_first( blocked_threads );
    468468                        on_notify(*popped.lock, popped.t);
    469469                }
     
    471471        }
    472472
    473         uintptr_t front( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; }
    474         bool empty ( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }
     473        uintptr_t front( fast_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info; }
     474        bool empty ( fast_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ); }
    475475
    476476        void wait( fast_cond_var(L) & this, L & l ) {
     
    494494        // pthread_cond_var
    495495
    496         void  ?{}( pthread_cond_var(L) & this ) with(this) {
     496        void ?{}( pthread_cond_var(L) & this ) with(this) {
    497497                blocked_threads{};
    498498                lock{};
     
    503503        bool notify_one( pthread_cond_var(L) & this ) with(this) {
    504504                lock( lock __cfaabi_dbg_ctx2 );
    505                 bool ret = ! blocked_threads`isEmpty;
     505                bool ret = ! isEmpty( blocked_threads );
    506506                if ( ret ) {
    507                         info_thread(L) & popped = try_pop_front( blocked_threads );
     507                        info_thread(L) & popped = remove_first( blocked_threads );
    508508                        popped.signalled = true;
    509509                        on_notify(*popped.lock, popped.t);
     
    515515        bool notify_all( pthread_cond_var(L) & this ) with(this) {
    516516                lock( lock __cfaabi_dbg_ctx2 );
    517                 bool ret = ! blocked_threads`isEmpty;
    518                 while( ! blocked_threads`isEmpty ) {
    519                         info_thread(L) & popped = try_pop_front( blocked_threads );
     517                bool ret = ! isEmpty( blocked_threads );
     518                while( ! isEmpty( blocked_threads ) ) {
     519                        info_thread(L) & popped = remove_first( blocked_threads );
    520520                        popped.signalled = true;
    521521                        on_notify(*popped.lock, popped.t);
     
    525525        }
    526526
    527         uintptr_t front( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; }
    528         bool empty ( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }
     527        uintptr_t front( pthread_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info; }
     528        bool empty ( pthread_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ); }
    529529
    530530        static void queue_info_thread_timeout( pthread_cond_var(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
    531531                lock( lock __cfaabi_dbg_ctx2 );
    532         insert_last( blocked_threads, info );
     532                insert_last( blocked_threads, info );
    533533                pthread_alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info };
    534534                unlock( lock );
    535535
    536536                // blocks here and registers alarm node before blocking after releasing locks to avoid deadlock
    537         size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) );
     537                size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) );
    538538
    539539                // unregisters alarm so it doesn't go off if signal happens first
     
    551551                lock( lock __cfaabi_dbg_ctx2 );
    552552                info_thread( L ) i = { active_thread(), info, &l };
    553         insert_last( blocked_threads, i );
    554                 unlock( lock );
    555 
    556         // blocks here
     553                insert_last( blocked_threads, i );
     554                unlock( lock );
     555
     556                // blocks here
    557557                size_t recursion_count = block_and_get_recursion( i );
    558558
     
    579579        }
    580580       
    581         bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t  ) {
     581        bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t ) {
    582582                PTHREAD_WAIT_TIME( info, &l , getDuration( t ) )
    583583        }
     
    585585//-----------------------------------------------------------------------------
    586586// Semaphore
    587 void  ?{}( semaphore & this, int count = 1 ) {
     587void ?{}( semaphore & this, int count = 1 ) {
    588588        (this.lock){};
    589589        this.count = count;
     
    603603                park();
    604604                return true;
    605         }
    606         else {
    607             unlock( lock );
    608             return false;
     605        } else {
     606                unlock( lock );
     607                return false;
    609608        }
    610609}
     
    622621
    623622        // make new owner
    624         if( doUnpark ) unpark( thrd );
     623        if ( doUnpark ) unpark( thrd );
    625624
    626625        return thrd;
  • libcfa/src/concurrency/locks.hfa

    rf85de47 r65bd3c2  
    1111// Created On       : Thu Jan 21 19:46:50 2021
    1212// Last Modified By : Peter A. Buhr
    13 // Last Modified On : Tue Dec 24 09:36:52 2024
    14 // Update Count     : 16
     13// Last Modified On : Fri Apr 25 07:14:16 2025
     14// Update Count     : 22
    1515//
    1616
     
    5656
    5757static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) {
    58     pp_fn( pp_datum );
    59     park();
     58        pp_fn( pp_datum );
     59        park();
    6060}
    6161
     
    6363
    6464#define DEFAULT_ON_NOTIFY( lock_type ) \
    65     static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
     65        static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); }
    6666
    6767#define DEFAULT_ON_WAIT( lock_type ) \
    68     static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
    69         unlock( this ); \
    70         pre_park_then_park( pp_fn, pp_datum ); \
    71         return 0; \
    72     }
     68        static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \
     69                unlock( this ); \
     70                pre_park_then_park( pp_fn, pp_datum ); \
     71                return 0; \
     72        }
    7373
    7474// on_wakeup impl if lock should be reacquired after waking up
    7575#define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \
    76     static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
     76        static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); }
    7777
    7878// on_wakeup impl if lock will not be reacquired after waking up
    7979#define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \
    80     static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
     80        static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {}
    8181
    8282
     
    142142static inline void ?{}( mcs_node & this ) { this.next = 0p; }
    143143
    144 static inline mcs_node * volatile & ?`next ( mcs_node * node ) {
     144static inline mcs_node * volatile & next( mcs_node * node ) {
    145145        return node->next;
    146146}
     
    156156
    157157static inline void unlock( mcs_lock & l, mcs_node & n ) {
    158         mcs_node * next = advance( l.queue, &n );
    159         if ( next ) post( next->sem );
     158        mcs_node * nxt = advance( l.queue, &n );
     159        if ( nxt ) post( nxt->sem );
    160160}
    161161
     
    181181
    182182static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) {
    183     n.locked = true;
     183        n.locked = true;
    184184
    185185        #if defined( __ARM_ARCH )
     
    187187        #endif
    188188
    189         mcs_spin_node * prev = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
    190         if ( prev == 0p ) return;
    191         prev->next = &n;
     189        mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );
     190        if ( prev_val == 0p ) return;
     191        prev_val->next = &n;
    192192       
    193193        #if defined( __ARM_ARCH )
     
    234234// to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params)
    235235static inline int futex( int *uaddr, int futex_op, int val ) {
    236     return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
     236        return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 );
    237237}
    238238
     
    271271static inline void unlock( futex_mutex & this ) with( this ) {
    272272        // if uncontended do atomic unlock and then return
    273     if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
     273        if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
    274274       
    275275        // otherwise threads are blocked so we must wake one
     
    311311        int state, init_state;
    312312
    313     // speculative grab
    314     state = internal_exchange( this, 1 );
    315     if ( ! state ) return;                                                              // state == 0
    316     init_state = state;
    317     for () {
    318         for ( 4 ) {
    319             while ( ! val ) {                                                   // lock unlocked
    320                 state = 0;
    321                 if ( internal_try_lock( this, state, init_state ) ) return;
    322             }
    323             for ( 30 ) Pause();
    324         }
    325 
    326         while ( ! val ) {                                                               // lock unlocked
    327             state = 0;
    328             if ( internal_try_lock( this, state, init_state ) ) return;
    329         }
    330         sched_yield();
    331        
    332         // if not in contended state, set to be in contended state
    333         state = internal_exchange( this, 2 );
    334         if ( ! state ) return;                                                  // state == 0
    335         init_state = 2;
    336         futex( (int*)&val, FUTEX_WAIT, 2 );                             // if val is not 2 this returns with EWOULDBLOCK
    337     }
     313        // speculative grab
     314        state = internal_exchange( this, 1 );
     315        if ( ! state ) return;                                                          // state == 0
     316        init_state = state;
     317        for () {
     318                for ( 4 ) {
     319                        while ( ! val ) {                                                       // lock unlocked
     320                                state = 0;
     321                                if ( internal_try_lock( this, state, init_state ) ) return;
     322                        }
     323                        for ( 30 ) Pause();
     324                }
     325
     326                while ( ! val ) {                                                               // lock unlocked
     327                        state = 0;
     328                        if ( internal_try_lock( this, state, init_state ) ) return;
     329                }
     330                sched_yield();
     331               
     332                // if not in contended state, set to be in contended state
     333                state = internal_exchange( this, 2 );
     334                if ( ! state ) return;                                                  // state == 0
     335                init_state = 2;
     336                futex( (int*)&val, FUTEX_WAIT, 2 );                             // if val is not 2 this returns with EWOULDBLOCK
     337        }
    338338}
    339339
    340340static inline void unlock( go_mutex & this ) with( this ) {
    341341        // if uncontended do atomic unlock and then return
    342     if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
     342        if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return;
    343343       
    344344        // otherwise threads are blocked so we must wake one
     
    384384
    385385static inline bool block( exp_backoff_then_block_lock & this ) with( this ) {
    386     lock( spinlock __cfaabi_dbg_ctx2 );
    387     if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
    388         unlock( spinlock );
    389         return true;
    390     }
    391     insert_last( blocked_threads, *active_thread() );
    392     unlock( spinlock );
     386        lock( spinlock __cfaabi_dbg_ctx2 );
     387        if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) {
     388                unlock( spinlock );
     389                return true;
     390        }
     391        insert_last( blocked_threads, *active_thread() );
     392        unlock( spinlock );
    393393        park( );
    394394        return true;
     
    415415
    416416static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) {
    417     if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
    418     lock( spinlock __cfaabi_dbg_ctx2 );
    419     thread$ * t = &try_pop_front( blocked_threads );
    420     unlock( spinlock );
    421     unpark( t );
     417        if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return;
     418        lock( spinlock __cfaabi_dbg_ctx2 );
     419        thread$ * t = &remove_first( blocked_threads );
     420        unlock( spinlock );
     421        unpark( t );
    422422}
    423423
     
    469469        lock( lock __cfaabi_dbg_ctx2 );
    470470        /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this );
    471         thread$ * t = &try_pop_front( blocked_threads );
     471        thread$ * t = &remove_first( blocked_threads );
    472472        held = ( t ? true : false );
    473473        unpark( t );
     
    476476
    477477static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) {
    478     lock( lock __cfaabi_dbg_ctx2 );
    479     insert_last( blocked_threads, *t );
    480     unlock( lock );
     478        lock( lock __cfaabi_dbg_ctx2 );
     479        insert_last( blocked_threads, *t );
     480        unlock( lock );
    481481}
    482482DEFAULT_ON_WAIT( fast_block_lock )
     
    521521
    522522        if ( owner != 0p ) {
    523         select_node node;
     523                select_node node;
    524524                insert_last( blocked_threads, node );
    525525                unlock( lock );
     
    533533
    534534static inline void pop_node( simple_owner_lock & this ) with( this ) {
    535     __handle_waituntil_OR( blocked_threads );
    536     select_node * node = &try_pop_front( blocked_threads );
    537     if ( node ) {
    538         owner = node->blocked_thread;
    539         recursion_count = 1;
    540         // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
    541         wake_one( blocked_threads, *node );
    542     } else {
    543         owner = 0p;
    544         recursion_count = 0;
    545     }
     535        __handle_waituntil_OR( blocked_threads );
     536        select_node * node = &remove_first( blocked_threads );
     537        if ( node ) {
     538                owner = node->blocked_thread;
     539                recursion_count = 1;
     540                // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread );
     541                wake_one( blocked_threads, *node );
     542        } else {
     543                owner = 0p;
     544                recursion_count = 0;
     545        }
    546546}
    547547
     
    582582        pop_node( this );
    583583
    584     select_node node;
    585     active_thread()->link_node = (void *)&node;
    586         unlock( lock );
    587 
    588     pre_park_then_park( pp_fn, pp_datum );
     584        select_node node;
     585        active_thread()->link_node = (void *)&node;
     586        unlock( lock );
     587
     588        pre_park_then_park( pp_fn, pp_datum );
    589589
    590590        return ret;
     
    595595// waituntil() support
    596596static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) {
    597     lock( lock __cfaabi_dbg_ctx2 );
    598 
    599     // check if we can complete operation. If so race to establish winner in special OR case
    600     if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
    601         if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
    602            unlock( lock );
    603            return false;
    604         }
    605     }
    606 
    607     if ( owner == active_thread() ) {
     597        lock( lock __cfaabi_dbg_ctx2 );
     598
     599        // check if we can complete operation. If so race to establish winner in special OR case
     600        if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) {
     601                if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering
     602                        unlock( lock );
     603                        return false;
     604                }
     605        }
     606
     607        if ( owner == active_thread() ) {
    608608                recursion_count++;
    609         if ( node.park_counter ) __make_select_node_available( node );
    610         unlock( lock );
     609                if ( node.park_counter ) __make_select_node_available( node );
     610                unlock( lock );
    611611                return true;
    612612        }
    613613
    614     if ( owner != 0p ) {
     614        if ( owner != 0p ) {
    615615                insert_last( blocked_threads, node );
    616616                unlock( lock );
    617617                return false;
    618618        }
    619    
     619       
    620620        owner = active_thread();
    621621        recursion_count = 1;
    622622
    623     if ( node.park_counter ) __make_select_node_available( node );
    624     unlock( lock );
    625     return true;
     623        if ( node.park_counter ) __make_select_node_available( node );
     624        unlock( lock );
     625        return true;
    626626}
    627627
    628628static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) {
    629     lock( lock __cfaabi_dbg_ctx2 );
    630     if ( node`isListed ) {
    631         remove( node );
    632         unlock( lock );
    633         return false;
    634     }
    635 
    636     if ( owner == active_thread() ) {
    637         recursion_count--;
    638         if ( recursion_count == 0 ) {
    639             pop_node( this );
    640         }
    641     }
    642     unlock( lock );
    643     return false;
     629        lock( lock __cfaabi_dbg_ctx2 );
     630        if ( isListed( node ) ) {
     631                remove( node );
     632                unlock( lock );
     633                return false;
     634        }
     635
     636        if ( owner == active_thread() ) {
     637                recursion_count--;
     638                if ( recursion_count == 0 ) {
     639                        pop_node( this );
     640                }
     641        }
     642        unlock( lock );
     643        return false;
    644644}
    645645
  • libcfa/src/concurrency/monitor.cfa

    rf85de47 r65bd3c2  
    99// Author           : Thierry Delisle
    1010// Created On       : Thd Feb 23 12:27:26 2017
    11 // Last Modified By : Kyoung Seo
    12 // Last Modified On : Thd Jan 16 12:59:00 2025
    13 // Update Count     : 73
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Fri Apr 25 07:20:22 2025
     13// Update Count     : 80
    1414//
    1515
     
    7878        __spinlock_t * locks[count];                                                    /* We need to pass-in an array of locks to BlockInternal */
    7979
    80 #define monitor_save    save  ( monitors, count, locks, recursions, masks )
     80#define monitor_save save ( monitors, count, locks, recursions, masks )
    8181#define monitor_restore restore( monitors, count, locks, recursions, masks )
    8282
     
    9595        if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
    9696                abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this );
    97         } else if ( !this->owner ) {
     97        } else if ( ! this->owner ) {
    9898                // No one has the monitor, just take it
    9999                __set_owner( this, thrd );
    100100
    101                 __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     101                __cfaabi_dbg_print_safe( "Kernel : mon is free \n" );
    102102        } else if ( this->owner == thrd) {
    103103                // We already have the monitor, just note how many times we took it
    104104                this->recursion += 1;
    105105
    106                 __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
     106                __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" );
    107107        } else if ( is_accepted( this, group) ) {
    108108                // Some one was waiting for us, enter
     
    112112                reset_mask( this );
    113113
    114                 __cfaabi_dbg_print_safe( "Kernel :  mon accepts \n" );
     114                __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" );
    115115        } else {
    116                 __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     116                __cfaabi_dbg_print_safe( "Kernel : blocking \n" );
    117117
    118118                // Some one else has the monitor, wait in line for it
     
    124124                park();
    125125
    126                 __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     126                __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this);
    127127
    128128                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     
    130130        }
    131131
    132         __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     132        __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this);
    133133
    134134        /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     
    152152
    153153
    154         if ( !this->owner ) {
     154        if ( ! this->owner ) {
    155155                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
    156156
     
    159159
    160160                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    161                 /* paranoid */ verify( !is_thrd || thrd->state == Halted || thrd->state == Cancelled );
     161                /* paranoid */ verify( ! is_thrd || thrd->state == Halted || thrd->state == Cancelled );
    162162
    163163                unlock( this->lock );
    164164                return;
    165         } else if ( this->owner == thrd && !join) {
     165        } else if ( this->owner == thrd && ! join) {
    166166                // We already have the monitor... but where about to destroy it so the nesting will fail
    167167                // Abort!
     
    179179
    180180                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    181                 /* paranoid */ verify( !is_thrd || thrd->state == Halted || thrd->state == Cancelled );
     181                /* paranoid */ verify( ! is_thrd || thrd->state == Halted || thrd->state == Cancelled );
    182182
    183183                unlock( this->lock );
     
    186186
    187187        // The monitor is busy, if this is a thread and the thread owns itself, it better be active
    188         /* paranoid */ verify( !is_thrd || this->owner != thrd || (thrd->state != Halted && thrd->state != Cancelled) );
     188        /* paranoid */ verify( ! is_thrd || this->owner != thrd || (thrd->state != Halted && thrd->state != Cancelled) );
    189189
    190190        __lock_size_t count = 1;
     
    192192        __monitor_group_t group = { &this, 1, func };
    193193        if ( is_accepted( this, group) ) {
    194                 __cfaabi_dbg_print_safe( "Kernel :  mon accepts dtor, block and signal it \n" );
     194                __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" );
    195195
    196196                // Wake the thread that is waiting for this
     
    220220                return;
    221221        } else {
    222                 __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     222                __cfaabi_dbg_print_safe( "Kernel : blocking \n" );
    223223
    224224                wait_ctx( thrd, 0 )
     
    254254        // it means we don't need to do anything
    255255        if ( this->recursion != 0) {
    256                 __cfaabi_dbg_print_safe( "Kernel :  recursion still %d\n", this->recursion);
     256                __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion);
    257257                unlock( this->lock );
    258258                return;
     
    264264        // Check the new owner is consistent with who we wake-up
    265265        // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
    266         /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     266        /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    267267
    268268        // We can now let other threads in safely
     
    270270
    271271        //We need to wake-up the thread
    272         /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     272        /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    273273        unpark( new_owner );
    274274}
     
    280280                        abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, active_thread(), this->owner);
    281281                }
    282                 if ( this->recursion != 1  && !join ) {
     282                if ( this->recursion != 1 && ! join ) {
    283283                        abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1);
    284284                }
     
    317317
    318318        // Unpark the next owner if needed
    319         /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     319        /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    320320        /* paranoid */ verify( ! __preemption_enabled() );
    321321        /* paranoid */ verify( thrd->state == Halted );
     
    424424
    425425static void ?{}(__condition_criterion_t & this ) with( this ) {
    426         ready  = false;
     426        ready = false;
    427427        target = 0p;
    428         owner  = 0p;
    429         next  = 0p;
     428        owner = 0p;
     429        this.next = 0p;
    430430}
    431431
    432432static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
    433         this.ready  = false;
     433        this.ready = false;
    434434        this.target = target;
    435         this.owner  = &owner;
    436         this.next   = 0p;
     435        this.owner = &owner;
     436        this.next = 0p;
    437437}
    438438
     
    525525        for ( i; count ) {
    526526                __condition_criterion_t * crit = &node->criteria[i];
    527                 assert( !crit->ready );
     527                assert( ! crit->ready );
    528528                push( crit->target->signal_stack, crit );
    529529        }
     
    536536
    537537bool signal_block( condition & this ) libcfa_public {
    538         if ( !this.blocked.head ) { return false; }
     538        if ( ! this.blocked.head ) { return false; }
    539539
    540540        //Check that everything is as expected
     
    571571        // WE WOKE UP
    572572
    573         __cfaabi_dbg_print_buffer_local( "Kernel :   signal_block returned\n" );
     573        __cfaabi_dbg_print_buffer_local( "Kernel : signal_block returned\n" );
    574574
    575575        //We are back, restore the masks and recursions
     
    581581// Access the user_info of the thread waiting at the front of the queue
    582582uintptr_t front( condition & this ) libcfa_public {
    583         verifyf( !is_empty(this),
     583        verifyf( ! is_empty(this),
    584584                "Attempt to access user data on an empty condition.\n"
    585585                "Possible cause is not checking if the condition is empty before reading stored data."
     
    624624        {
    625625                // Check if the entry queue
    626                 thread$ * next; int index;
    627                 [next, index] = search_entry_queue( mask, monitors, count );
    628 
    629                 if ( next ) {
     626                thread$ * nxt; int index;
     627                [nxt, index] = search_entry_queue( mask, monitors, count );
     628
     629                if ( nxt ) {
    630630                        *mask.accepted = index;
    631631                        __acceptable_t& accepted = mask[index];
    632632                        if ( accepted.is_dtor ) {
    633633                                __cfaabi_dbg_print_buffer_local( "Kernel : dtor already there\n" );
    634                                 verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
     634                                verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." );
    635635
    636636                                monitor$ * mon2dtor = accepted[0];
     
    651651                                monitor_save;
    652652
    653                                 __cfaabi_dbg_print_buffer_local( "Kernel :  baton of %"PRIdFAST16" monitors : ", count );
     653                                __cfaabi_dbg_print_buffer_local( "Kernel : baton of %"PRIdFAST16" monitors : ", count );
    654654                                #ifdef __CFA_DEBUG_PRINT__
    655655                                        for ( i; count ) {
     
    660660
    661661                                // Set the owners to be the next thread
    662                                 __set_owner( monitors, count, next );
     662                                __set_owner( monitors, count, nxt );
    663663
    664664                                // unlock all the monitors
     
    666666
    667667                                // unpark the thread we signalled
    668                                 unpark( next );
     668                                unpark( nxt );
    669669
    670670                                //Everything is ready to go to sleep
     
    741741        /* paranoid */ verify ( monitors[0]->lock.lock );
    742742        /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] );
    743         monitors[0]->owner        = owner;
    744         monitors[0]->recursion    = 1;
     743        monitors[0]->owner = owner;
     744        monitors[0]->recursion = 1;
    745745        for ( i; 1~count ) {
    746746                /* paranoid */ verify ( monitors[i]->lock.lock );
    747747                /* paranoid */ verifyf( monitors[i]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[i]->owner, monitors[i]->recursion, monitors[i] );
    748                 monitors[i]->owner        = owner;
    749                 monitors[i]->recursion    = 0;
     748                monitors[i]->owner = owner;
     749                monitors[i]->recursion = 0;
    750750        }
    751751}
     
    765765static inline thread$ * next_thread( monitor$ * this ) {
    766766        //Check the signaller stack
    767         __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     767        __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top);
    768768        __condition_criterion_t * urgent = pop( this->signal_stack );
    769769        if ( urgent ) {
     
    771771                //regardless of if we are ready to baton pass,
    772772                //we need to set the monitor as in use
    773                 /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    774                 __set_owner( this,  urgent->owner->waiting_thread );
     773                /* paranoid */ verifyf( ! this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     774                __set_owner( this, urgent->owner->waiting_thread );
    775775
    776776                return check_condition( urgent );
     
    780780        // Get the next thread in the entry_queue
    781781        thread$ * new_owner = pop_head( this->entry_queue );
    782         /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    783         /* paranoid */ verify( !new_owner || new_owner->user_link.next == 0p );
     782        /* paranoid */ verifyf( ! this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     783        /* paranoid */ verify( ! new_owner || new_owner->user_link.next == 0p );
    784784        __set_owner( this, new_owner );
    785785
     
    792792
    793793        // Check if there are any acceptable functions
    794         if ( !it ) return false;
     794        if ( ! it ) return false;
    795795
    796796        // If this isn't the first monitor to test this, there is no reason to repeat the test.
     
    820820        for ( i; count ) {
    821821                (criteria[i]){ monitors[i], waiter };
    822                 __cfaabi_dbg_print_safe( "Kernel :  target %p = %p\n", criteria[i].target, &criteria[i] );
     822                __cfaabi_dbg_print_safe( "Kernel : target %p = %p\n", criteria[i].target, &criteria[i] );
    823823                push( criteria[i].target->signal_stack, &criteria[i] );
    824824        }
     
    902902        }
    903903
    904         __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? (thread*)node->waiting_thread : (thread*)0p );
     904        __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? (thread*)node->waiting_thread : (thread*)0p );
    905905        return ready2run ? node->waiting_thread : 0p;
    906906}
     
    908908static inline void brand_condition( condition & this ) {
    909909        thread$ * thrd = active_thread();
    910         if ( !this.monitors ) {
     910        if ( ! this.monitors ) {
    911911                // __cfaabi_dbg_print_safe( "Branding\n" );
    912912                assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data );
     
    928928        for ( __acceptable_t * it = begin; it != end; it++, i++ ) {
    929929                #if defined( __CFA_WITH_VERIFY__ )
    930                 thread$ * last = 0p;
     930                thread$ * prior = 0p;
    931931                #endif // __CFA_WITH_VERIFY__
    932932
     
    934934                        thread$ * curr = *thrd_it;
    935935
    936                         /* paranoid */ verifyf( !last || last->user_link.next == curr, "search not making progress, from %p (%p) to %p",
    937                                                                         last, last->user_link.next, curr );
    938                         /* paranoid */ verifyf( curr != last, "search not making progress, from %p to %p", last, curr );
     936                        /* paranoid */ verifyf( ! prior || prior->user_link.next == curr, "search not making progress, from %p (%p) to %p",
     937                                                                        prior, prior->user_link.next, curr );
     938                        /* paranoid */ verifyf( curr != prior, "search not making progress, from %p to %p", prior, curr );
    939939
    940940                        // For each thread in the entry-queue check for a match
     
    945945
    946946                        #if defined( __CFA_WITH_VERIFY__ )
    947                         last = curr;
     947                        prior = curr;
    948948                        #endif
    949949                } // for
     
    10011001        if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) {
    10021002                abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this );
    1003         } else if ( !this->owner ) {
     1003        } else if ( ! this->owner ) {
    10041004                // No one has the monitor, just take it
    10051005                __set_owner( this, thrd );
    10061006
    1007                 __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     1007                __cfaabi_dbg_print_safe( "Kernel : mon is free \n" );
    10081008        } else if ( this->owner == thrd) {
    10091009                // We already have the monitor, just note how many times we took it
    10101010                this->recursion += 1;
    10111011
    1012                 __cfaabi_dbg_print_safe( "Kernel :  mon already owned \n" );
     1012                __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" );
    10131013        } else {
    1014                 __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
     1014                __cfaabi_dbg_print_safe( "Kernel : blocking \n" );
    10151015
    10161016                // Some one else has the monitor, wait in line for it
     
    10221022                park();
    10231023
    1024                 __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     1024                __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this);
    10251025
    10261026                /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
     
    10281028        }
    10291029
    1030         __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     1030        __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this);
    10311031
    10321032        /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
  • libcfa/src/concurrency/preemption.cfa

    rf85de47 r65bd3c2  
    1010// Created On       : Mon Jun 5 14:20:42 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Jan  9 08:42:59 2023
    13 // Update Count     : 60
     12// Last Modified On : Fri Apr 25 07:24:39 2025
     13// Update Count     : 63
    1414//
    1515
     
    3939__attribute__((weak)) Duration default_preemption() libcfa_public {
    4040        const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION");
    41         if(!preempt_rate_s) {
     41        if ( !preempt_rate_s) {
    4242                __cfadbg_print_safe(preemption, "No CFA_DEFAULT_PREEMPTION in ENV\n");
    4343                return __CFA_DEFAULT_PREEMPTION__;
     
    4646        char * endptr = 0p;
    4747        long int preempt_rate_l = strtol(preempt_rate_s, &endptr, 10);
    48         if(preempt_rate_l < 0 || preempt_rate_l > 65535) {
     48        if (preempt_rate_l < 0 || preempt_rate_l > 65535) {
    4949                __cfadbg_print_safe(preemption, "CFA_DEFAULT_PREEMPTION out of range : %ld\n", preempt_rate_l);
    5050                return __CFA_DEFAULT_PREEMPTION__;
    5151        }
    52         if('\0' != *endptr) {
     52        if ('\0' != *endptr) {
    5353                __cfadbg_print_safe(preemption, "CFA_DEFAULT_PREEMPTION not a decimal number : %s\n", preempt_rate_s);
    5454                return __CFA_DEFAULT_PREEMPTION__;
     
    6464// FwdDeclarations : Signal handlers
    6565static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
    66 static void sigHandler_alarm    ( __CFA_SIGPARMS__ );
    67 static void sigHandler_segv     ( __CFA_SIGPARMS__ );
    68 static void sigHandler_ill      ( __CFA_SIGPARMS__ );
    69 static void sigHandler_fpe      ( __CFA_SIGPARMS__ );
    70 static void sigHandler_abort    ( __CFA_SIGPARMS__ );
     66static void sigHandler_alarm( __CFA_SIGPARMS__ );
     67static void sigHandler_segv( __CFA_SIGPARMS__ );
     68static void sigHandler_ill( __CFA_SIGPARMS__ );
     69static void sigHandler_fpe( __CFA_SIGPARMS__ );
     70static void sigHandler_abort( __CFA_SIGPARMS__ );
    7171
    7272// FwdDeclarations : alarm thread main
     
    8686#endif
    8787
    88 KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
    89 event_kernel_t * event_kernel;                        // kernel public handle to even kernel
    90 static pthread_t alarm_thread;                        // pthread handle to alarm thread
    91 static void * alarm_stack;                                                        // pthread stack for alarm thread
     88KERNEL_STORAGE(event_kernel_t, event_kernel);                   // private storage for event kernel
     89event_kernel_t * event_kernel;                                                  // kernel public handle to even kernel
     90static pthread_t alarm_thread;                                                  // pthread handle to alarm thread
     91static void * alarm_stack;                                                              // pthread stack for alarm thread
    9292
    9393static void ?{}(event_kernel_t & this) with( this ) {
     
    102102// Get next expired node
    103103static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) {
    104         if( ! & (*alarms)`first ) return 0p;                                            // If no alarms return null
    105         if( (*alarms)`first.deadline >= currtime ) return 0p;   // If alarms head not expired return null
    106         return pop(alarms);                                                                     // Otherwise just pop head
     104        if ( ! & first( *alarms ) ) return 0p;                            // If no alarms return null
     105        if ( first( *alarms ).deadline >= currtime ) return 0p; // If alarms head not expired return null
     106        return pop(alarms);                                                                       // Otherwise just pop head
    107107}
    108108
     
    117117                __cfadbg_print_buffer_decl( preemption, " KERNEL: preemption tick %lu\n", currtime.tn);
    118118                Duration period = node->period;
    119                 if( period == 0 ) {
    120                         node->set = false;                  // Node is one-shot, just mark it as not pending
     119                if ( period == 0 ) {
     120                        node->set = false;                                // Node is one-shot, just mark it as not pending
    121121                }
    122122
     
    125125
    126126                // Check if this is a kernel
    127                 if( node->type == Kernel ) {
     127                if ( node->type == Kernel ) {
    128128                        preempt( node->proc );
    129129                }
    130                 else if( node->type == User ) {
     130                else if ( node->type == User ) {
    131131                        __cfadbg_print_buffer_local( preemption, " KERNEL: alarm unparking %p.\n", node->thrd );
    132132                        timeout( node->thrd );
     
    137137
    138138                // Check if this is a periodic alarm
    139                 if( period > 0 ) {
     139                if ( period > 0 ) {
    140140                        __cfadbg_print_buffer_local( preemption, " KERNEL: alarm period is %lu.\n", period`ns );
    141141                        node->deadline = currtime + period;  // Alarm is periodic, add currtime to it (used cached current time)
    142                         insert( alarms, node );            // Reinsert the node for the next time it triggers
     142                        insert( alarms, node );                  // Reinsert the node for the next time it triggers
    143143                }
    144144        }
    145145
    146146        // If there are still alarms pending, reset the timer
    147         if( & (*alarms)`first ) {
    148                 Duration delta = (*alarms)`first.deadline - currtime;
     147        if ( & first( *alarms ) ) {
     148                Duration delta = first( *alarms ).deadline - currtime;
    149149                __kernel_set_timer( delta );
    150150        }
     
    283283                __attribute__((unused)) unsigned short new_val = disable_count + 1;
    284284                disable_count = new_val;
    285                 verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
     285                verify( new_val < 65_000u );                      // If this triggers someone is disabling interrupts without enabling them
    286286        }
    287287
     
    301301
    302302                        // Check if we need to prempt the thread because an interrupt was missed
    303                         if( prev == 1 ) {
     303                        if ( prev == 1 ) {
    304304                                #if GCC_VERSION > 50000
    305305                                        static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     
    313313                                // Signal the compiler that a fence is needed but only for signal handlers
    314314                                __atomic_signal_fence(__ATOMIC_RELEASE);
    315                                 if( poll && proc->pending_preemption ) {
     315                                if ( poll && proc->pending_preemption ) {
    316316                                        proc->pending_preemption = false;
    317317                                        force_yield( __POLL_PREEMPTION );
     
    334334                // Signal the compiler that a fence is needed but only for signal handlers
    335335                __atomic_signal_fence(__ATOMIC_RELEASE);
    336                 if( unlikely( proc->pending_preemption ) ) {
     336                if ( unlikely( proc->pending_preemption ) ) {
    337337                        proc->pending_preemption = false;
    338338                        force_yield( __POLL_PREEMPTION );
     
    347347void __cfaabi_check_preemption() libcfa_public {
    348348        bool ready = __preemption_enabled();
    349         if(!ready) { abort("Preemption should be ready"); }
     349        if ( !ready) { abort("Preemption should be ready"); }
    350350
    351351        sigset_t oldset;
    352352        int ret;
    353353        ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
    354         if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
     354        if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    355355
    356356        ret = sigismember(&oldset, SIGUSR1);
    357         if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    358         if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
     357        if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
     358        if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
    359359
    360360        ret = sigismember(&oldset, SIGALRM);
    361         if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    362         if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
     361        if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
     362        if (ret == 0) { abort("ERROR SIGALRM is enabled"); }
    363363
    364364        ret = sigismember(&oldset, SIGTERM);
    365         if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    366         if(ret == 1) { abort("ERROR SIGTERM is disabled"); }
     365        if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
     366        if (ret == 1) { abort("ERROR SIGTERM is disabled"); }
    367367}
    368368
     
    385385
    386386        if ( __cfaabi_pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) {
    387             abort( "internal error, pthread_sigmask" );
     387                abort( "internal error, pthread_sigmask" );
    388388        }
    389389}
     
    415415        int ret;
    416416        ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
    417         if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
     417        if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    418418
    419419        ret = sigismember(&oldset, SIGUSR1);
    420         if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    421         if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
     420        if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
     421        if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
    422422
    423423        ret = sigismember(&oldset, SIGALRM);
    424         if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    425         if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
     424        if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
     425        if (ret == 0) { abort("ERROR SIGALRM is enabled"); }
    426426
    427427        signal_block( SIGUSR1 );
     
    434434        int ret;
    435435        ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset);  // workaround trac#208: cast should be unnecessary
    436         if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
     436        if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    437437
    438438        ret = sigismember(&oldset, SIGUSR1);
    439         if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    440         if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
     439        if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
     440        if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
    441441
    442442        ret = sigismember(&oldset, SIGALRM);
    443         if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    444         if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
     443        if (ret <  0) { abort("ERROR sigismember returned %d", ret); }
     444        if (ret == 0) { abort("ERROR SIGALRM is enabled"); }
    445445}
    446446
     
    453453        // Check if preemption is safe
    454454        bool ready = true;
    455         if( __cfaabi_in( ip, __libcfa_nopreempt ) ) { ready = false; goto EXIT; };
    456         if( __cfaabi_in( ip, __libcfathrd_nopreempt ) ) { ready = false; goto EXIT; };
    457 
    458         if( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; };
    459         if( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; };
     455        if ( __cfaabi_in( ip, __libcfa_nopreempt ) ) { ready = false; goto EXIT; };
     456        if ( __cfaabi_in( ip, __libcfathrd_nopreempt ) ) { ready = false; goto EXIT; };
     457
     458        if ( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; };
     459        if ( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; };
    460460
    461461EXIT:
     
    484484        // Setup proper signal handlers
    485485        __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); // __cfactx_switch handler
    486         __cfaabi_sigaction( SIGALRM, sigHandler_alarm    , SA_SIGINFO ); // debug handler
     486        __cfaabi_sigaction( SIGALRM, sigHandler_alarm , SA_SIGINFO ); // debug handler
    487487
    488488        signal_block( SIGALRM );
     
    551551        // before the kernel thread has even started running. When that happens, an interrupt
    552552        // with a null 'this_processor' will be caught, just ignore it.
    553         if(! __cfaabi_tls.this_processor ) return;
     553        if ( ! __cfaabi_tls.this_processor ) return;
    554554
    555555        choose(sfp->si_value.sival_int) {
    556                 case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    557                 case PREEMPT_IO       : ;// I/O asked to stop spinning, nothing to do here
     556                case PREEMPT_NORMAL: ;                                                  // Normal case, nothing to do here
     557                case PREEMPT_IO: ;                                                              // I/O asked to stop spinning, nothing to do here
    558558                case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
    559559                default:
     
    562562
    563563        // Check if it is safe to preempt here
    564         if( !preemption_ready( ip ) ) {
     564        if ( !preemption_ready( ip ) ) {
    565565                #if !defined(__CFA_NO_STATISTICS__)
    566566                        __cfaabi_tls.this_stats->ready.threads.preempt.rllfwd++;
     
    607607        sigfillset(&mask);
    608608        if ( __cfaabi_pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) {
    609             abort( "internal error, pthread_sigmask" );
     609                abort( "internal error, pthread_sigmask" );
    610610        }
    611611
     
    622622                __cfadbg_print_buffer_local( preemption, " KERNEL: SI_QUEUE %d, SI_TIMER %d, SI_KERNEL %d\n", SI_QUEUE, SI_TIMER, SI_KERNEL );
    623623
    624                 if( sig < 0 ) {
     624                if ( sig < 0 ) {
    625625                        //Error!
    626626                        int err = errno;
  • libcfa/src/concurrency/pthread.cfa

    rf85de47 r65bd3c2  
    99// Author           : Zhenyan Zhu
    1010// Created On       : Sat Aug 6 16:29:18 2022
    11 // Last Modified By : Kyoung Seo
    12 // Last Modified On : Mon Jan 27 20:35:00 2025
    13 // Update Count     : 1
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Fri Apr 25 07:28:01 2025
     13// Update Count     : 4
    1414//
    1515
     
    4040        bool in_use;
    4141        void (* destructor)( void * );
    42     dlist( pthread_values ) threads;
     42        dlist( pthread_values ) threads;
    4343};
    4444
     
    543543                //      p.in_use = false;
    544544                // }
    545         pthread_values * p = &try_pop_front( cfa_pthread_keys[key].threads );
    546         for ( ; p; ) {           
    547             p->in_use = false;
    548             p = &try_pop_front( cfa_pthread_keys[key].threads );
    549         }
     545                for ( pthread_values * p = &remove_first( cfa_pthread_keys[key].threads ); p; p = &remove_first( cfa_pthread_keys[key].threads ) ) {
     546                        p->in_use = false;
     547                }
    550548                unlock(key_lock);
    551549                return 0;
     
    603601        //######################### Parallelism #########################
    604602        void pthread_delete_kernel_threads_() __THROW { // see uMain::~uMain
    605                 Pthread_kernel_threads * p = &try_pop_front(cfa_pthreads_kernel_threads);
    606                 for ( ; p; ) {
    607             delete(p);
    608                         p = &try_pop_front(cfa_pthreads_kernel_threads);
     603               
     604                for ( Pthread_kernel_threads * p = &remove_first(cfa_pthreads_kernel_threads); p; p = &remove_first(cfa_pthreads_kernel_threads) ) {
     605                        delete(p);
    609606                } // for
    610607        } // pthread_delete_kernel_threads_
     
    626623                } // for
    627624                for ( ; new_level < cfa_pthreads_no_kernel_threads; cfa_pthreads_no_kernel_threads -= 1 ) { // remove processors ?
    628                         delete(&try_pop_front(cfa_pthreads_kernel_threads));
     625                        delete(&remove_first(cfa_pthreads_kernel_threads));
    629626                } // for
    630627                unlock( concurrency_lock );
  • libcfa/src/concurrency/select.hfa

    rf85de47 r65bd3c2  
    1010// Author           : Colby Alexander Parsons
    1111// Created On       : Thu Jan 21 19:46:50 2023
    12 // Last Modified By : Kyoung Seo
    13 // Last Modified On : Wed Mar 19 12:00:00 2025
    14 // Update Count     : 1
     12// Last Modified By : Peter A. Buhr
     13// Last Modified On : Fri Apr 25 07:31:26 2025
     14// Update Count     : 5
    1515//
    1616
     
    3333static inline bool __CFA_has_clause_run( unsigned long int status ) { return status == __SELECT_RUN; }
    3434static inline void __CFA_maybe_park( int * park_counter ) {
    35     if ( __atomic_sub_fetch( park_counter, 1, __ATOMIC_SEQ_CST) < 0 )
    36         park();
     35        if ( __atomic_sub_fetch( park_counter, 1, __ATOMIC_SEQ_CST) < 0 )
     36                park();
    3737}
    3838
    3939// node used for coordinating waituntil synchronization
    4040struct select_node {
    41     int * park_counter;                // If this is 0p then the node is in a special OR case waituntil
    42     unsigned long int * clause_status;  // needs to point at ptr sized location, if this is 0p then node is not part of a waituntil
    43 
    44     void * extra;                       // used to store arbitrary data needed by some primitives
    45 
    46     thread$ * blocked_thread;
    47     inline dlink(select_node);
     41        int * park_counter;                              // If this is 0p then the node is in a special OR case waituntil
     42        unsigned long int * clause_status;  // needs to point at ptr sized location, if this is 0p then node is not part of a waituntil
     43
     44        void * extra;                                      // used to store arbitrary data needed by some primitives
     45
     46        thread$ * blocked_thread;
     47        inline dlink(select_node);
    4848};
    4949P9_EMBEDDED( select_node, dlink(select_node) )
    5050
    5151static inline void ?{}( select_node & this ) {
    52     this.blocked_thread = active_thread();
    53     this.clause_status = 0p;
    54     this.park_counter = 0p;
    55     this.extra = 0p;
     52        this.blocked_thread = active_thread();
     53        this.clause_status = 0p;
     54        this.park_counter = 0p;
     55        this.extra = 0p;
    5656}
    5757
    5858static inline void ?{}( select_node & this, thread$ * blocked_thread ) {
    59     this.blocked_thread = blocked_thread;
    60     this.clause_status = 0p;
    61     this.park_counter = 0p;
    62     this.extra = 0p;
     59        this.blocked_thread = blocked_thread;
     60        this.clause_status = 0p;
     61        this.park_counter = 0p;
     62        this.extra = 0p;
    6363}
    6464
    6565static inline void ?{}( select_node & this, thread$ * blocked_thread, void * extra ) {
    66     this.blocked_thread = blocked_thread;
    67     this.clause_status = 0p;
    68     this.park_counter = 0p;
    69     this.extra = extra;
     66        this.blocked_thread = blocked_thread;
     67        this.clause_status = 0p;
     68        this.park_counter = 0p;
     69        this.extra = extra;
    7070}
    7171static inline void ^?{}( select_node & this ) {}
     
    7676// this is used inside the compiler to attempt to establish an else clause as a winner in the OR special case race
    7777static inline bool __select_node_else_race( select_node & this ) with( this ) {
    78     unsigned long int cmp_status = __SELECT_UNSAT;
    79     return *clause_status == 0
    80             && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST );
     78        unsigned long int cmp_status = __SELECT_UNSAT;
     79        return *clause_status == 0
     80                        && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST );
    8181}
    8282
     
    8585forall(T & | sized(T))
    8686trait is_selectable {
    87     // For registering a select stmt on a selectable concurrency primitive
    88     // Returns bool that indicates if operation is already SAT
    89     bool register_select( T &, select_node & );
    90 
    91     // For unregistering a select stmt on a selectable concurrency primitive
    92     // If true is returned then the corresponding code block is run (only in non-special OR case and only if node status is not RUN)
    93     bool unregister_select( T &, select_node & );
    94 
    95     // This routine is run on the selecting thread prior to executing the statement corresponding to the select_node
    96     //    passed as an arg to this routine. If true is returned proceed as normal, if false is returned the statement is skipped
    97     bool on_selected( T &, select_node & );
     87        // For registering a select stmt on a selectable concurrency primitive
     88        // Returns bool that indicates if operation is already SAT
     89        bool register_select( T &, select_node & );
     90
     91        // For unregistering a select stmt on a selectable concurrency primitive
     92        // If true is returned then the corresponding code block is run (only in non-special OR case and only if node status is not RUN)
     93        bool unregister_select( T &, select_node & );
     94
     95        // This routine is run on the selecting thread prior to executing the statement corresponding to the select_node
     96        //      passed as an arg to this routine. If true is returned proceed as normal, if false is returned the statement is skipped
     97        bool on_selected( T &, select_node & );
    9898};
    9999// Used inside the compiler to allow for overloading on return type for operations such as '?<<?' for channels
     
    107107
    108108static inline void __make_select_node_unsat( select_node & this ) with( this ) {
    109     __atomic_store_n( clause_status, __SELECT_UNSAT, __ATOMIC_SEQ_CST );
     109        __atomic_store_n( clause_status, __SELECT_UNSAT, __ATOMIC_SEQ_CST );
    110110}
    111111static inline void __make_select_node_sat( select_node & this ) with( this ) {
    112     __atomic_store_n( clause_status, __SELECT_SAT, __ATOMIC_SEQ_CST );
     112        __atomic_store_n( clause_status, __SELECT_SAT, __ATOMIC_SEQ_CST );
    113113}
    114114
    115115// used for the 2-stage avail needed by the special OR case
    116116static inline bool __mark_select_node( select_node & this, unsigned long int val ) with( this ) {
    117     /* paranoid */ verify( park_counter == 0p );
    118     /* paranoid */ verify( clause_status != 0p );
    119 
    120     unsigned long int cmp_status = __SELECT_UNSAT;
    121     while( !__atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
    122         if ( cmp_status != __SELECT_PENDING ) return false;
    123         cmp_status = __SELECT_UNSAT;
    124     }
    125     return true;
     117        /* paranoid */ verify( park_counter == 0p );
     118        /* paranoid */ verify( clause_status != 0p );
     119
     120        unsigned long int cmp_status = __SELECT_UNSAT;
     121        while( ! __atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
     122                if ( cmp_status != __SELECT_PENDING ) return false;
     123                cmp_status = __SELECT_UNSAT;
     124        }
     125        return true;
    126126}
    127127
    128128// used for the 2-stage avail by the thread who owns a pending node
    129129static inline bool __pending_set_other( select_node & other, select_node & mine, unsigned long int val ) with( other ) {
    130     /* paranoid */ verify( park_counter == 0p );
    131     /* paranoid */ verify( clause_status != 0p );
    132 
    133     unsigned long int cmp_status = __SELECT_UNSAT;
    134     while( !__atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
    135         if ( cmp_status != __SELECT_PENDING )
    136             return false;
    137 
    138         // toggle current status flag to avoid starvation/deadlock
    139         __make_select_node_unsat( mine );
    140         cmp_status = __SELECT_UNSAT;
    141         if ( !__atomic_compare_exchange_n( mine.clause_status, &cmp_status, __SELECT_PENDING, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) )
    142             return false;
    143         cmp_status = __SELECT_UNSAT;
    144     }
    145     return true;
     130        /* paranoid */ verify( park_counter == 0p );
     131        /* paranoid */ verify( clause_status != 0p );
     132
     133        unsigned long int cmp_status = __SELECT_UNSAT;
     134        while( ! __atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {
     135                if ( cmp_status != __SELECT_PENDING )
     136                        return false;
     137
     138                // toggle current status flag to avoid starvation/deadlock
     139                __make_select_node_unsat( mine );
     140                cmp_status = __SELECT_UNSAT;
     141                if ( ! __atomic_compare_exchange_n( mine.clause_status, &cmp_status, __SELECT_PENDING, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) )
     142                        return false;
     143                cmp_status = __SELECT_UNSAT;
     144        }
     145        return true;
    146146}
    147147
    148148static inline bool __make_select_node_pending( select_node & this ) with( this ) {
    149     return __mark_select_node( this, __SELECT_PENDING );
     149        return __mark_select_node( this, __SELECT_PENDING );
    150150}
    151151
     
    153153// return true if we want to unpark the thd
    154154static inline bool __make_select_node_available( select_node & this ) with( this ) {
    155     /* paranoid */ verify( clause_status != 0p );
    156     if( !park_counter )
    157         return __mark_select_node( this, (unsigned long int)&this );
    158 
    159     unsigned long int cmp_status = __SELECT_UNSAT;
    160 
    161     return *clause_status == 0 // C_TODO might not need a cmp_xchg in non special OR case
    162         && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) // can maybe just use atomic write
    163         && !__atomic_add_fetch( park_counter, 1, __ATOMIC_SEQ_CST);
     155        /* paranoid */ verify( clause_status != 0p );
     156        if ( ! park_counter )
     157                return __mark_select_node( this, (unsigned long int)&this );
     158
     159        unsigned long int cmp_status = __SELECT_UNSAT;
     160
     161        return *clause_status == 0 // C_TODO might not need a cmp_xchg in non special OR case
     162                && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) // can maybe just use atomic write
     163                && ! __atomic_add_fetch( park_counter, 1, __ATOMIC_SEQ_CST);
    164164}
    165165
    166166// Handles the special OR case of the waituntil statement
    167167// Since only one select node can win in the OR case, we need to race to set the node available BEFORE
    168 //    performing the operation since if we lose the race the operation should not be performed as it will be lost
     168// performing the operation since if we lose the race the operation should not be performed as it will be lost
    169169// Returns true if execution can continue normally and false if the queue has now been drained
    170170static inline bool __handle_waituntil_OR( dlist( select_node ) & queue ) {
    171     if ( queue`isEmpty ) return false;
    172     if ( queue`first.clause_status && !queue`first.park_counter ) {
    173         while ( !queue`isEmpty ) {
    174             // if node not a special OR case or if we win the special OR case race break
    175             if ( !queue`first.clause_status || queue`first.park_counter || __make_select_node_available( queue`first ) )
    176                 return true;
    177             // otherwise we lost the special OR race so discard node
    178             try_pop_front( queue );
    179         }
    180         return false;
    181     }
    182     return true;
     171        if ( isEmpty( queue ) ) return false;
     172        if ( first( queue ).clause_status && ! first( queue ).park_counter ) {
     173                while ( ! isEmpty( queue ) ) {
     174                        // if node not a special OR case or if we win the special OR case race break
     175                        if ( ! first( queue ).clause_status || first( queue ).park_counter || __make_select_node_available( first( queue ) ) )
     176                                return true;
     177                        // otherwise we lost the special OR race so discard node
     178                        remove_first( queue );
     179                }
     180                return false;
     181        }
     182        return true;
    183183}
    184184
    185185// wake one thread from the list
    186186static inline void wake_one( dlist( select_node ) & /*queue*/, select_node & popped ) {
    187     if ( !popped.clause_status                              // normal case, node is not a select node
    188         || ( popped.clause_status && !popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available
    189         || __make_select_node_available( popped ) )        // check if popped link belongs to a selecting thread
    190         unpark( popped.blocked_thread );
    191 }
    192 
    193 static inline void wake_one( dlist( select_node ) & queue ) { wake_one( queue, try_pop_front( queue ) ); }
     187        if ( ! popped.clause_status                                                       // normal case, node is not a select node
     188                || ( popped.clause_status && ! popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available
     189                || __make_select_node_available( popped ) )              // check if popped link belongs to a selecting thread
     190                unpark( popped.blocked_thread );
     191}
     192
     193static inline void wake_one( dlist( select_node ) & queue ) { wake_one( queue, remove_first( queue ) ); }
    194194
    195195static inline void setup_clause( select_node & this, unsigned long int * clause_status, int * park_counter ) {
    196     this.blocked_thread = active_thread();
    197     this.clause_status = clause_status;
    198     this.park_counter = park_counter;
     196        this.blocked_thread = active_thread();
     197        this.clause_status = clause_status;
     198        this.park_counter = park_counter;
    199199}
    200200
    201201// waituntil ( timeout( ... ) ) support
    202202struct select_timeout_node {
    203     alarm_node_t a_node;
    204     select_node * s_node;
     203        alarm_node_t a_node;
     204        select_node * s_node;
    205205};
    206206void ?{}( select_timeout_node & this, Duration duration, Alarm_Callback callback );
  • libcfa/src/executor.cfa

    rf85de47 r65bd3c2  
    2121        T * remove( Buffer(T, TLink) & mutex buf ) with(buf) {
    2222                dlist( T, TLink ) * qptr = &queue;                              // workaround https://cforall.uwaterloo.ca/trac/ticket/166
    23                 // if ( (*qptr)`isEmpty ) wait( delay );                // no request to process ? => wait
    24           if ( (*qptr)`isEmpty ) return 0p;                                     // no request to process ? => wait
     23                // if ( isEmpty( *qptr ) ) wait( delay );               // no request to process ? => wait
     24          if ( isEmpty( *qptr ) ) return 0p;                            // no request to process ? => wait
    2525                return &try_pop_front( *qptr );
    2626        } // remove
     
    9393        unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers;
    9494//      for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) {
    95     for ( i; nworkers : start; 0u ~ @ ~ range : range; ) {
    96             range = reqPerWorker + ( i < extras ? 1 : 0 );
     95        for ( i; nworkers : start; 0u ~ @ ~ range : range; ) {
     96                range = reqPerWorker + ( i < extras ? 1 : 0 );
    9797                workers[i] = new( cluster, requests, start, range );
    9898        } // for
  • src/Common/ScopedMap.hpp

    rf85de47 r65bd3c2  
    1010// Created On       : Wed Dec 2 11:37:00 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Feb 15 08:41:28 2022
    13 // Update Count     : 5
     12// Last Modified On : Mon May 13 07:32:09 2024
     13// Update Count     : 6
    1414//
    1515
     
    3131class ScopedMap {
    3232        typedef std::map< Key, Value > MapType;
     33public:
    3334        struct Scope {
    3435                MapType map;
     
    4445                Scope & operator= (Scope &&) = default;
    4546        };
     47  private:
    4648        typedef std::vector< Scope > ScopeList;
    4749
  • src/Common/SemanticError.hpp

    rf85de47 r65bd3c2  
    1010// Created On       : Mon May 18 07:44:20 2015
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Sun Dec 15 21:04:32 2024
    13 // Update Count     : 77
     12// Last Modified On : Tue Apr  1 11:11:33 2025
     13// Update Count     : 79
    1414//
    1515
     
    6565        {"gcc-attributes"           , Severity::Warn, "invalid attribute: %s." },
    6666        {"c++-like-copy"            , Severity::Warn, "Constructor from reference is not a valid copy constructor." },
    67         {"depreciated-trait-syntax" , Severity::Warn, "trait type-parameters are now specified using the forall clause." },
     67        {"depreciated-trait-syntax" , Severity::Warn, "trait type-parameters, trait name(T,U), now specified using forall clause, forall(T,U) trait name." },
    6868};
    6969
  • tests/collections/atomic_mpsc.cfa

    rf85de47 r65bd3c2  
    1313void ?{}(some_node & this) { this.next = 0p; }
    1414
    15 static inline some_node * volatile & ?`next ( some_node * node ) {
     15static inline some_node * volatile & next( some_node * node ) {
    1616        return node->next;
    1717}
  • tests/list/dlist-insert-remove.cfa

    rf85de47 r65bd3c2  
    5959        do {
    6060                sout | f.adatum;
    61         } while (f`moveNext);
     61        } while (advance( f ));
    6262}
    6363
     
    6666        do {
    6767                sout | f.adatum;
    68         } while (f`movePrev);
     68        } while (recede( f ));
    6969}
    7070
     
    9494        do {
    9595                sout | f.adatum;
    96         } while (f`moveNext);
     96        } while (advance( f ));
    9797}
    9898
     
    101101        do {
    102102                sout | f.adatum;
    103         } while (f`movePrev);
     103        } while (recede( f ));
    104104}
    105105
     
    127127        do {
    128128                sout | m.anotherdatum;
    129         } while (m`moveNext);
     129        } while (advance( m ));
    130130}
    131131
     
    133133        do {
    134134                sout | m.anotherdatum;
    135         } while (m`movePrev);
     135        } while (recede( m ));
    136136}
    137137
     
    423423        dlist(fred, fred.mine) lf;
    424424
    425         assert( & lf`first == 0p );
    426         assert( & lf`last == 0p );
     425        assert( &first( lf ) == 0p );
     426        assert( &last( lf ) == 0p );
    427427
    428428        insert_first(lf, f1);
    429429
    430         assert( & lf`first == & f1 );
    431         assert( & lf`last  == & f1 );
     430        assert( &first( lf ) == &f1 );
     431        assert( &last( lf )  == &f1 );
    432432
    433433        verify(validate(lf));
     
    440440        printYourFreddies(f1, f2, 0);   // 3.14; 3.14; 0.5; 0.5 (unmodified)
    441441
    442         assert( & lf`first == & f1 );
    443         assert( & lf`last  == & f2 );
     442        assert( &first( lf ) == &f1 );
     443        assert( &last( lf )  == &f2 );
    444444}
    445445
     
    451451        dlist(fred, fred.yours) lf;
    452452
    453         assert( & lf`first == 0p );
    454         assert( & lf`last == 0p );
     453        assert( &first( lf ) == 0p );
     454        assert( &last( lf ) == 0p );
    455455
    456456        insert_first(lf, f1);
    457457
    458         assert( & lf`first == & f1 );
    459         assert( & lf`last  == & f1 );
     458        assert( &first( lf ) == & f1 );
     459        assert( &last( lf )  == & f1 );
    460460
    461461        verify(validate(lf));
     
    468468        printYourFreddies(f1, f2, 0);   // 3.14, 0.5; 3.14; 0.5; 0.5, 3.14 (modified)
    469469
    470         assert( & lf`first == & f1 );
    471         assert( & lf`last  == & f2 );
     470        assert( &first( lf ) == & f1 );
     471        assert( &last( lf )  == & f2 );
    472472}
    473473
     
    479479        dlist(mary) lm;
    480480
    481         assert( & lm`first == 0p );
    482         assert( & lm`last == 0p );
     481        assert( &first( lm ) == 0p );
     482        assert( &last( lm ) == 0p );
    483483
    484484        insert_first(lm, m1);
    485485
    486         assert( & lm`first == & m1 );
    487         assert( & lm`last  == & m1 );
     486        assert( &first( lm ) == & m1 );
     487        assert( &last( lm )  == & m1 );
    488488
    489489        verify(validate(lm));
     
    495495        printMariatheotokos(m1, m2, 0); // 3.14, 0.5; 3.14; 0.5; 0.5, 3.14 (modified)
    496496
    497         assert( & lm`first == & m1 );
    498         assert( & lm`last  == & m2 );
     497        assert( &first( lm ) == & m1 );
     498        assert( &last( lm )  == & m2 );
    499499}
    500500
     
    516516        dlist(fred, fred.mine) lf;
    517517
    518         assert( & lf`first == 0p );
    519         assert( & lf`last == 0p );
     518        assert( &first( lf ) == 0p );
     519        assert( &last( lf ) == 0p );
    520520
    521521        insert_last(lf, f2);
    522522
    523         assert( & lf`first == & f2 );
    524         assert( & lf`last  == & f2 );
     523        assert( &first( lf ) == & f2 );
     524        assert( &last( lf )  == & f2 );
    525525
    526526        verify(validate(lf));
     
    533533        printYourFreddies(f1, f2, 0);   // 3.14; 3.14; 0.5; 0.5 (unmodified)
    534534
    535         assert( & lf`first == & f1 );
    536         assert( & lf`last  == & f2 );
     535        assert( &first( lf ) == & f1 );
     536        assert( &last( lf )  == & f2 );
    537537}
    538538
     
    544544        dlist(fred, fred.yours) lf;
    545545
    546         assert( & lf`first == 0p );
    547         assert( & lf`last == 0p );
     546        assert( &first( lf ) == 0p );
     547        assert( &last( lf ) == 0p );
    548548
    549549        insert_last(lf, f2);
    550550
    551         assert( & lf`first == & f2 );
    552         assert( & lf`last  == & f2 );
     551        assert( &first( lf ) == & f2 );
     552        assert( &last( lf )  == & f2 );
    553553
    554554        verify(validate(lf));
     
    561561        printYourFreddies(f1, f2, 0);   // 3.14, 0.5; 3.14; 0.5; 0.5, 3.14 (modified)
    562562
    563         assert( & lf`first == & f1 );
    564         assert( & lf`last  == & f2 );
     563        assert( &first( lf ) == & f1 );
     564        assert( &last( lf )  == & f2 );
    565565}
    566566
     
    572572        dlist(mary) lm;
    573573
    574         assert( & lm`first == 0p );
    575         assert( & lm`last == 0p );
     574        assert( &first( lm ) == 0p );
     575        assert( &last( lm ) == 0p );
    576576
    577577        insert_last(lm, m2);
    578578
    579         assert( & lm`first == & m2 );
    580         assert( & lm`last  == & m2 );
     579        assert( &first( lm ) == & m2 );
     580        assert( &last( lm )  == & m2 );
    581581
    582582        verify(validate(lm));
     
    588588        printMariatheotokos(m1, m2, 0); // 3.14, 0.5; 3.14; 0.5; 0.5, 3.14 (modified)
    589589
    590         assert( & lm`first == & m1 );
    591         assert( & lm`last  == & m2 );
     590        assert( &first( lm ) == & m1 );
     591        assert( &last( lm )  == & m2 );
    592592}
    593593#if 0
     
    891891        insert_last(fly, f3);
    892892
    893         printMyFreddies(flm`first, flm`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    894         printYourFreddies(fly`first, fly`last, 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     893        printMyFreddies(first( flm ), last( flm ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     894        printYourFreddies(first( fly ), last( fly ), 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    895895
    896896        verify(validate(fly));
     
    902902        verify(validate(flm));
    903903
    904         printMyFreddies(flm`first, flm`last, 0);     // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
    905         printYourFreddies(fly`first, fly`last, 0);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
     904        printMyFreddies(first( flm ), last( flm ), 0);     // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
     905        printYourFreddies(first( fly ), last( fly ), 0);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
    906906
    907907        // observe f1 is now solo in mine; in yours, it was just traversed
     
    930930        insert_last(fly, f3);
    931931
    932         printMyFreddies(flm`first, flm`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    933         printYourFreddies(fly`first, fly`last, 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     932        printMyFreddies(first( flm ), last( flm ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     933        printYourFreddies(first( fly ), last( fly ), 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    934934
    935935        verify(validate(fly));
     
    941941        verify(validate(flm));
    942942
    943         printMyFreddies(flm`first, flm`last, 0);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
    944         printYourFreddies(fly`first, fly`last, 0);   // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
     943        printMyFreddies(first( flm ), last( flm ), 0);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
     944        printYourFreddies(first( fly ), last( fly ), 0);   // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
    945945
    946946        // observe f1 is now solo in yours; in mine, it was just traversed
     
    963963        insert_last(ml, m3);
    964964
    965         printMariatheotokos(ml`first, ml`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     965        printMariatheotokos(first( ml ), last( ml ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    966966
    967967        verify(validate(ml));
     
    971971        verify(validate(ml));
    972972
    973         printMariatheotokos(ml`first, ml`last, 0);     // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
     973        printMariatheotokos(first( ml ), last( ml ), 0);     // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
    974974
    975975        // observe m1 is now solo
     
    10071007        insert_last(fly, f3);
    10081008
    1009         printMyFreddies(flm`first, flm`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1010         printYourFreddies(fly`first, fly`last, 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1009        printMyFreddies(first( flm ), last( flm ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1010        printYourFreddies(first( fly ), last( fly ), 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    10111011
    10121012        verify(validate(fly));
     
    10181018        verify(validate(flm));
    10191019
    1020         printMyFreddies(flm`first, flm`last, 0);     // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
    1021         printYourFreddies(fly`first, fly`last, 0);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
     1020        printMyFreddies(first( flm ), last( flm ), 0);     // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
     1021        printYourFreddies(first( fly ), last( fly ), 0);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
    10221022
    10231023        // observe f3 is now solo in mine; in yours, it was just traversed
     
    10451045        insert_last(fly, f3);
    10461046
    1047         printMyFreddies(flm`first, flm`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1048         printYourFreddies(fly`first, fly`last, 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1047        printMyFreddies(first( flm ), last( flm ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1048        printYourFreddies(first( fly ), last( fly ), 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    10491049
    10501050        verify(validate(fly));
     
    10561056        verify(validate(flm));
    10571057
    1058         printMyFreddies(flm`first, flm`last, 0);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
    1059         printYourFreddies(fly`first, fly`last, 0);   // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
     1058        printMyFreddies(first( flm ), last( flm ), 0);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
     1059        printYourFreddies(first( fly ), last( fly ), 0);   // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
    10601060
    10611061        // observe f3 is now solo in yours; in mine, it was just traversed
     
    10781078        insert_last(ml, m3);
    10791079
    1080         printMariatheotokos(ml`first, ml`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1080        printMariatheotokos(first( ml ), last( ml ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    10811081
    10821082        verify(validate(ml));
     
    10861086        verify(validate(ml));
    10871087
    1088         printMariatheotokos(ml`first, ml`last, 0);     // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
     1088        printMariatheotokos(first( ml ), last( ml ), 0);     // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
    10891089
    10901090        // observe m3 is now solo
     
    11161116        insert_last(fly, f);
    11171117
    1118         printMyFreddies(flm`first, flm`last, 1);     // 0.7; 0.7; 0.7; 0.7
    1119         printYourFreddies(fly`first, fly`last, 1);   // 0.7; 0.7; 0.7; 0.7
     1118        printMyFreddies(first( flm ), last( flm ), 1);     // 0.7; 0.7; 0.7; 0.7
     1119        printYourFreddies(first( fly ), last( fly ), 1);   // 0.7; 0.7; 0.7; 0.7
    11201120
    11211121        verify(validate(fly));
     
    11271127        verify(validate(flm));
    11281128
    1129         assert( & flm`first == 0p );
    1130         assert( & flm`last == 0p );
    1131 
    1132         printYourFreddies(fly`first, fly`last, 0);   // 0.7; 0.7; 0.7; 0.7 (unmodified)
     1129        assert( &first( flm ) == 0p );
     1130        assert( &last( flm ) == 0p );
     1131
     1132        printYourFreddies(first( fly ), last( fly ), 0);   // 0.7; 0.7; 0.7; 0.7 (unmodified)
    11331133
    11341134        // observe f is solo in mine (now unlisted); in yours, it was just traversed
     
    11421142        verify(validate(fly));
    11431143        verify(validate(flm));
    1144         printMyFreddies(flm`first, flm`last, 0);     // 0.7; 0.7; 0.7; 0.7
     1144        printMyFreddies(first( flm ), last( flm ), 0);     // 0.7; 0.7; 0.7; 0.7
    11451145}
    11461146
     
    11551155        insert_last(fly, f);
    11561156
    1157         printMyFreddies(flm`first, flm`last, 1);     // 0.7; 0.7; 0.7; 0.7
    1158         printYourFreddies(fly`first, fly`last, 1);   // 0.7; 0.7; 0.7; 0.7
     1157        printMyFreddies(first( flm ), last( flm ), 1);     // 0.7; 0.7; 0.7; 0.7
     1158        printYourFreddies(first( fly ), last( fly ), 1);   // 0.7; 0.7; 0.7; 0.7
    11591159
    11601160        verify(validate(fly));
     
    11661166        verify(validate(flm));
    11671167
    1168         assert( & fly`first == 0p );
    1169         assert( & fly`last == 0p );
    1170 
    1171         printYourFreddies(flm`first, flm`last, 0);   // 0.7; 0.7; 0.7; 0.7 (unmodified)
     1168        assert( &first( fly ) == 0p );
     1169        assert( &last( fly ) == 0p );
     1170
     1171        printYourFreddies(first( flm ), last( flm ), 0);   // 0.7; 0.7; 0.7; 0.7 (unmodified)
    11721172
    11731173        // observe f is solo in yours (now unlisted); in mine, it was just traversed
     
    11811181        verify(validate(fly));
    11821182        verify(validate(flm));
    1183         printYourFreddies(fly`first, fly`last, 0);     // 0.7; 0.7; 0.7; 0.7
     1183        printYourFreddies(first( fly ), last( fly ), 0);     // 0.7; 0.7; 0.7; 0.7
    11841184}
    11851185
     
    11911191        insert_last(ml, m);
    11921192
    1193         printMariatheotokos(ml`first, ml`last, 1);     // 0.7; 0.7; 0.7; 0.7
     1193        printMariatheotokos(first( ml ), last( ml ), 1);     // 0.7; 0.7; 0.7; 0.7
    11941194
    11951195        verify(validate(ml));
     
    11991199        verify(validate(ml));
    12001200
    1201         assert( & ml`first == 0p );
    1202         assert( & ml`last == 0p );
     1201        assert( &first( ml ) == 0p );
     1202        assert( &last( ml ) == 0p );
    12031203
    12041204        // observe f is solo in mine (now unlisted); in yours, it was just traversed
     
    12111211        insert_last(ml, m);
    12121212        verify(validate(ml));
    1213         printMariatheotokos(ml`first, ml`last, 0);     // 0.7; 0.7; 0.7; 0.7
     1213        printMariatheotokos(first( ml ), last( ml ), 0);     // 0.7; 0.7; 0.7; 0.7
    12141214}
    12151215
     
    12421242        insert_last(fly, f3);
    12431243
    1244         printMyFreddies(flm`first, flm`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1245         printYourFreddies(fly`first, fly`last, 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1246 
    1247         verify(validate(fly));
    1248         verify(validate(flm));
    1249 
    1250         fred & popped = try_pop_front(flm);
    1251 
    1252         verify(validate(fly));
    1253         verify(validate(flm));
    1254 
    1255         printMyFreddies(flm`first, flm`last, 0);     // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
    1256         printYourFreddies(fly`first, fly`last, 0);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
     1244        printMyFreddies(first( flm ), last( flm ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1245        printYourFreddies(first( fly ), last( fly ), 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1246
     1247        verify(validate(fly));
     1248        verify(validate(flm));
     1249
     1250        fred & popped = remove_first(flm);
     1251
     1252        verify(validate(fly));
     1253        verify(validate(flm));
     1254
     1255        printMyFreddies(first( flm ), last( flm ), 0);     // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
     1256        printYourFreddies(first( fly ), last( fly ), 0);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
    12571257
    12581258        // observe f1 is now solo in mine; in yours, it was just traversed
     
    12781278        insert_last(fly, f3);
    12791279
    1280         printMyFreddies(flm`first, flm`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1281         printYourFreddies(fly`first, fly`last, 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1282 
    1283         verify(validate(fly));
    1284         verify(validate(flm));
    1285 
    1286         fred & popped = try_pop_front(fly);
    1287 
    1288         verify(validate(fly));
    1289         verify(validate(flm));
    1290 
    1291         printMyFreddies(flm`first, flm`last, 0);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
    1292         printYourFreddies(fly`first, fly`last, 0);   // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
     1280        printMyFreddies(first( flm ), last( flm ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1281        printYourFreddies(first( fly ), last( fly ), 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1282
     1283        verify(validate(fly));
     1284        verify(validate(flm));
     1285
     1286        fred & popped = remove_first(fly);
     1287
     1288        verify(validate(fly));
     1289        verify(validate(flm));
     1290
     1291        printMyFreddies(first( flm ), last( flm ), 0);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
     1292        printYourFreddies(first( fly ), last( fly ), 0);   // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
    12931293
    12941294        // observe f1 is now solo in yours; in mine, it was just traversed
     
    13091309        insert_last(ml, m3);
    13101310
    1311         printMariatheotokos(ml`first, ml`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1311        printMariatheotokos(first( ml ), last( ml ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    13121312
    13131313        verify(validate(ml));
    13141314
    1315         mary & popped = try_pop_front(ml);
     1315        mary & popped = remove_first(ml);
    13161316
    13171317        verify(validate(ml));
    13181318
    1319         printMariatheotokos(ml`first, ml`last, 0);     // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
     1319        printMariatheotokos(first( ml ), last( ml ), 0);     // 2.7, 3.7;       2.7;  3.7;  3.7, 2.7      (modified)
    13201320
    13211321        // observe m1 is now solo
     
    13411341        insert_last(fly, f3);
    13421342
    1343         printMyFreddies(flm`first, flm`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1344         printYourFreddies(fly`first, fly`last, 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1345 
    1346         verify(validate(fly));
    1347         verify(validate(flm));
    1348 
    1349         fred & popped = try_pop_back(flm);
    1350 
    1351         verify(validate(fly));
    1352         verify(validate(flm));
    1353 
    1354         printMyFreddies(flm`first, flm`last, 0);     // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
    1355         printYourFreddies(fly`first, fly`last, 0);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
     1343        printMyFreddies(first( flm ), last( flm ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1344        printYourFreddies(first( fly ), last( fly ), 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1345
     1346        verify(validate(fly));
     1347        verify(validate(flm));
     1348
     1349        fred & popped = remove_last(flm);
     1350
     1351        verify(validate(fly));
     1352        verify(validate(flm));
     1353
     1354        printMyFreddies(first( flm ), last( flm ), 0);     // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
     1355        printYourFreddies(first( fly ), last( fly ), 0);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
    13561356
    13571357        // observe f3 is now solo in mine; in yours, it was just traversed
     
    13771377        insert_last(fly, f3);
    13781378
    1379         printMyFreddies(flm`first, flm`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1380         printYourFreddies(fly`first, fly`last, 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    1381 
    1382         verify(validate(fly));
    1383         verify(validate(flm));
    1384 
    1385         fred & popped = try_pop_back(fly);
    1386 
    1387         verify(validate(fly));
    1388         verify(validate(flm));
    1389 
    1390         printMyFreddies(flm`first, flm`last, 0);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
    1391         printYourFreddies(fly`first, fly`last, 0);   // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
     1379        printMyFreddies(first( flm ), last( flm ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1380        printYourFreddies(first( fly ), last( fly ), 1);   // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1381
     1382        verify(validate(fly));
     1383        verify(validate(flm));
     1384
     1385        fred & popped = remove_last(fly);
     1386
     1387        verify(validate(fly));
     1388        verify(validate(flm));
     1389
     1390        printMyFreddies(first( flm ), last( flm ), 0);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7 (unmodified)
     1391        printYourFreddies(first( fly ), last( fly ), 0);   // 1.7, 2.7;       1.7;  2.7;  2.7, 1.7      (modified)
    13921392
    13931393        // observe f3 is now solo in yours; in mine, it was just traversed
     
    14081408        insert_last(ml, m3);
    14091409
    1410         printMariatheotokos(ml`first, ml`last, 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
     1410        printMariatheotokos(first( ml ), last( ml ), 1);     // 1.7, 2.7, 3.7;  1.7;  3.7;  3.7, 2.7, 1.7
    14111411
    14121412        verify(validate(ml));
    14131413
    1414         mary & popped = try_pop_back(ml);
     1414        mary & popped = remove_last(ml);
    14151415
    14161416        verify(validate(ml));
    14171417
    1418         printMariatheotokos(ml`first, ml`last, 0);     // 1.7, 1.7;       1.7;  2.7;  2.7, 1.7      (modified)
     1418        printMariatheotokos(first( ml ), last( ml ), 0);     // 1.7, 1.7;       1.7;  2.7;  2.7, 1.7      (modified)
    14191419
    14201420        // observe m1 is now solo
     
    14281428// Section 4g
    14291429//
    1430 // Test cases of `isEmpty, `hasPrev, `hasNext,
    1431 // try_pop_front, try_pop_back, modifications via `elems
     1430// Test cases of isEmpty, isFirst, isLast,
     1431// remove_first, remove_last, modifications via iter
    14321432//
    14331433// Example of call-side user code
     
    14411441        mary m3 = {3.7};
    14421442
    1443         dlist(mary) ml;                 assert( ml`isEmpty);
    1444 
    1445         insert_last(ml, m1);    assert(!ml`isEmpty);
    1446         insert_last(ml, m2);    assert(!ml`isEmpty);
    1447         insert_last(ml, m3);    assert(!ml`isEmpty);
    1448 
    1449         mary & m1prev = m1`prev;
    1450         mary & m1next = m1`next;
    1451         mary & m2prev = m2`prev;
    1452         mary & m2next = m2`next;
    1453         mary & m3prev = m3`prev;
    1454         mary & m3next = m3`next;
    1455 
    1456         assert (&m1prev == 0p);
    1457         assert (&m1next == &m2);
    1458         assert (&m2prev == &m1);
    1459         assert (&m2next == &m3);
    1460         assert (&m3prev == &m2);
    1461         assert (&m3next == 0p);
    1462 
    1463         assert(!m1`hasPrev);
    1464         assert( m1`hasNext);
    1465         assert( m2`hasPrev);
    1466         assert( m2`hasNext);
    1467         assert( m3`hasPrev);
    1468         assert(!m3`hasNext);
     1443        dlist(mary) ml;                 assert( isEmpty( ml ));
     1444
     1445        insert_last(ml, m1);    assert(!isEmpty( ml ));
     1446        insert_last(ml, m2);    assert(!isEmpty( ml ));
     1447        insert_last(ml, m3);    assert(!isEmpty( ml ));
     1448
     1449        mary & m1prev = prev( m1 );
     1450        mary & m1next = next( m1 );
     1451        mary & m2prev = prev( m2 );
     1452        mary & m2next = next( m2 );
     1453        mary & m3prev = prev( m3 );
     1454        mary & m3next = next( m3 );
     1455
     1456        assert( &m1prev == 0p );
     1457        assert( &m1next == &m2 );
     1458        assert( &m2prev == &m1 );
     1459        assert( &m2next == &m3 );
     1460        assert( &m3prev == &m2 );
     1461        assert( &m3next == 0p );
     1462
     1463        assert( ! isFirst( m1 ) );
     1464        assert( isLast( m1 ) );
     1465        assert( isFirst( m2 ) );
     1466        assert( isLast( m2 ) );
     1467        assert( isFirst( m3 ) );
     1468        assert( ! isLast( m3 ) );
    14691469
    14701470        printf("accessor_cases done\n");
     
    14861486        // queue, back to front
    14871487
    1488         assert( ml`isEmpty);
     1488        assert( isEmpty( ml ));
    14891489
    14901490        insert_last(ml, m1);
     
    14921492        insert_last(ml, m3);
    14931493
    1494         &m1r = & try_pop_front(ml);     assert(!ml`isEmpty);
    1495         &m2r = & try_pop_front(ml);     assert(!ml`isEmpty);
    1496         &m3r = & try_pop_front(ml);     assert( ml`isEmpty);
    1497         &mxr = & try_pop_front(ml);     assert( ml`isEmpty);
     1494        &m1r = & remove_first(ml);      assert(!isEmpty( ml ));
     1495        &m2r = & remove_first(ml);      assert(!isEmpty( ml ));
     1496        &m3r = & remove_first(ml);      assert( isEmpty( ml ));
     1497        &mxr = & remove_first(ml);      assert( isEmpty( ml ));
    14981498
    14991499        assert( &m1r == &m1 );
     
    15081508        // queue, front to back
    15091509
    1510         assert( ml`isEmpty);
     1510        assert( isEmpty( ml ));
    15111511
    15121512        insert_first(ml, m1);
     
    15141514        insert_first(ml, m3);
    15151515
    1516         &m1r = & try_pop_back(ml);      assert(!ml`isEmpty);
    1517         &m2r = & try_pop_back(ml);      assert(!ml`isEmpty);
    1518         &m3r = & try_pop_back(ml);      assert( ml`isEmpty);
    1519         &mxr = & try_pop_back(ml);      assert( ml`isEmpty);
     1516        &m1r = & remove_last(ml);       assert(!isEmpty( ml ));
     1517        &m2r = & remove_last(ml);       assert(!isEmpty( ml ));
     1518        &m3r = & remove_last(ml);       assert( isEmpty( ml ));
     1519        &mxr = & remove_last(ml);       assert( isEmpty( ml ));
    15201520
    15211521        assert( &m1r == &m1 );
     
    15301530        // stack at front
    15311531
    1532         assert( ml`isEmpty);
     1532        assert( isEmpty( ml ));
    15331533
    15341534        insert_first(ml, m1);
     
    15361536        insert_first(ml, m3);
    15371537
    1538         &m3r = & try_pop_front(ml);     assert(!ml`isEmpty);
    1539         &m2r = & try_pop_front(ml);     assert(!ml`isEmpty);
    1540         &m1r = & try_pop_front(ml);     assert( ml`isEmpty);
    1541         &mxr = & try_pop_front(ml);     assert( ml`isEmpty);
     1538        &m3r = & remove_first(ml);      assert(!isEmpty( ml ));
     1539        &m2r = & remove_first(ml);      assert(!isEmpty( ml ));
     1540        &m1r = & remove_first(ml);      assert( isEmpty( ml ));
     1541        &mxr = & remove_first(ml);      assert( isEmpty( ml ));
    15421542
    15431543        assert( &m1r == &m1 );
     
    15521552        // stack at back
    15531553
    1554         assert( ml`isEmpty);
     1554        assert( isEmpty( ml ));
    15551555
    15561556        insert_last(ml, m1);
     
    15581558        insert_last(ml, m3);
    15591559
    1560         &m3r = & try_pop_back(ml);      assert(!ml`isEmpty);
    1561         &m2r = & try_pop_back(ml);      assert(!ml`isEmpty);
    1562         &m1r = & try_pop_back(ml);      assert( ml`isEmpty);
    1563         &mxr = & try_pop_back(ml);      assert( ml`isEmpty);
     1560        &m3r = & remove_last(ml);       assert(!isEmpty( ml ));
     1561        &m2r = & remove_last(ml);       assert(!isEmpty( ml ));
     1562        &m1r = & remove_last(ml);       assert( isEmpty( ml ));
     1563        &mxr = & remove_last(ml);       assert( isEmpty( ml ));
    15641564
    15651565        assert( &m1r == &m1 );
     
    15801580
    15811581        dlist(mary) ml;
    1582         mary & mlorigin = ml`elems;
     1582        mary & mlorigin = iter( ml );
    15831583
    15841584        // insert before the origin
    15851585
    1586         insert_before( ml`elems, m1 );
    1587         assert( ! ml`isEmpty );
    1588 
    1589         mary & mlfirst = ml`first;
    1590         mary & mllast = ml`last;
     1586        insert_before( iter( ml ), m1 );
     1587        assert( ! isEmpty( ml ) );
     1588
     1589        mary & mlfirst = first( ml );
     1590        mary & mllast = last( ml );
    15911591
    15921592        assert( &m1 == & mlfirst );
     
    15951595        // moveNext after last goes back to origin, &vv
    15961596
    1597         bool canMoveNext = mllast`moveNext;
    1598         bool canMovePrev = mlfirst`movePrev;
     1597        bool canMoveNext = advance( mllast );
     1598        bool canMovePrev = recede( mlfirst );
    15991599
    16001600        assert( ! canMoveNext );
     
    16091609void test__isListed_cases__mary() {
    16101610
    1611         mary m1 = {1.7};        assert(! m1`isListed);
    1612         mary m2 = {2.7};        assert(! m2`isListed);
    1613         mary m3 = {3.7};        assert(! m3`isListed);
     1611        mary m1 = {1.7};        assert( ! isListed( m1 ) );
     1612        mary m2 = {2.7};        assert( ! isListed( m2 ) );
     1613        mary m3 = {3.7};        assert( ! isListed( m3 ) );
    16141614
    16151615        dlist(mary) ml;
    16161616
    1617         insert_last(ml, m1);    assert(  m1`isListed);  assert(! m2`isListed);
    1618         insert_last(ml, m2);    assert(  m2`isListed);  assert(! m3`isListed);
    1619         insert_last(ml, m3);    assert(  m3`isListed);
    1620 
    1621         remove( m1 );           assert(! m1`isListed);  assert(  m2`isListed);
    1622         remove( m2 );           assert(! m2`isListed);  assert(  m3`isListed);
    1623         remove( m3 );           assert(! m3`isListed);
     1617        insert_last(ml, m1);    assert( isListed( m1 ) );  assert(! isListed( m2 ) );
     1618        insert_last(ml, m2);    assert( isListed( m2 ) );  assert(! isListed( m3 ) );
     1619        insert_last(ml, m3);    assert( isListed( m3 ) );
     1620
     1621        remove( m1 );           assert( ! isListed( m1 ) );  assert(  isListed( m2 ) );
     1622        remove( m2 );           assert( ! isListed( m2 ) );  assert(  isListed( m3 ) );
     1623        remove( m3 );           assert( ! isListed( m3 ) );
    16241624
    16251625        printf("isListed cases done\n");
  • tests/zombies/hashtable.cfa

    rf85de47 r65bd3c2  
    7171        dlist(tN, tE) & bucket = buckets[ bucket_of(this, k) ];
    7272
    73         for ( tN * item = & $tempcv_e2n(bucket`first);  item != 0p;  item = & $tempcv_e2n((*item)`next) ) {
     73        for ( tN * item = & $tempcv_e2n(first( bucket ));  item != 0p;  item = & $tempcv_e2n((*item)`next) ) {
    7474            if ( key(*item) == k ) {
    7575                return *item;
     
    9494        dlist(tN, tE) & bucket = buckets[ bucket_of(this, k) ];
    9595
    96         for ( tN * item = & $tempcv_e2n(bucket`first);  item != 0p;  item = & $tempcv_e2n((*item)`next) ) {
     96        for ( tN * item = & $tempcv_e2n(first( bucket ));  item != 0p;  item = & $tempcv_e2n((*item)`next) ) {
    9797            if ( key(*item) == k ) {
    9898                remove(*item);
  • tests/zombies/hashtable2.cfa

    rf85de47 r65bd3c2  
    149149        dlist(request_in_ht_by_src, request) & bucket = buckets[ bucket_of(this, k) ];
    150150
    151         for ( request_in_ht_by_src * item = & $tempcv_e2n(bucket`first);  item != 0p;  item = & $tempcv_e2n((*item)`next) ) {
     151        for ( request_in_ht_by_src * item = & $tempcv_e2n(first( bucket ));  item != 0p;  item = & $tempcv_e2n((*item)`next) ) {
    152152            if ( key(*item) == k ) {
    153153                return *item;
     
    177177        dlist(request_in_ht_by_src, request) & bucket = buckets[ bucket_of(this, k) ];
    178178
    179         for ( request_in_ht_by_src * item = & $tempcv_e2n(bucket`first);  item != 0p;  item = & $tempcv_e2n((*item)`next) ) {
     179        for ( request_in_ht_by_src * item = & $tempcv_e2n(first( bucket ));  item != 0p;  item = & $tempcv_e2n((*item)`next) ) {
    180180            if ( key(*item) == k ) {
    181181                remove(*item);
     
    257257
    258258    // will re-implement as an actual splice
    259     while ( & src_to_empty`first != 0p ) {
     259    while ( & src_to_first( empty ) != 0p ) {
    260260        insert_last( snk_to_fill_at_last, pop_first( src_to_empty ) );
    261261    }
     
    319319
    320320        // fill new table with old items
    321         while ( & items`first != 0p ) {
     321        while ( &first( items ) != 0p ) {
    322322            put( this, pop_first( items ) );
    323323        }
  • tests/zombies/linked-list-perf/experiment.koad

    rf85de47 r65bd3c2  
    144144        for ( volatile unsigned int t = 0; t < Times; t += 1 ) {
    145145                Repeat( insert_last( lst, s[i] ) );
    146                 Repeat( remove( lst`first ) );
     146                Repeat( remove( first( lst ) ) );
    147147        }
    148148        end = clock();
     
    168168        for ( volatile unsigned int t = 0; t < Times; t += 1 ) {
    169169                Repeat( insert_last( lst, s[i] ) );
    170                 Repeat( remove( lst`first ) );
     170                Repeat( remove( first( lst ) ) );
    171171        }
    172172        end = clock();
  • tests/zombies/linked-list-perf/mike-old.hfa

    rf85de47 r65bd3c2  
    99// Author           : Michael Brooks
    1010// Created On       : Wed Apr 22 18:00:00 2020
    11 // Last Modified By : Michael Brooks
    12 // Last Modified On : Wed Apr 22 18:00:00 2020
    13 // Update Count     : 1
     11// Last Modified By : Peter A. Buhr
     12// Last Modified On : Mon Apr 21 17:32:37 2025
     13// Update Count     : 3
    1414//
    1515
     
    147147        }
    148148
    149         static inline Telem & ?`first( dlist(Tnode, Telem) &l ) {
     149        static inline Telem & first( dlist(Tnode, Telem) &l ) {
    150150                return * l.$links.next.elem;
    151151        }
     
    157157        #if !defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__))
    158158        static bool $validate_fwd( dlist(Tnode, Telem) & this ) {
    159                 Tnode * it = & $tempcv_e2n( this`first );
     159                Tnode * it = & $tempcv_e2n( first( this ) );
    160160                if (!it) return (& this`last == 0p);
    161161
     
    170170        static bool $validate_rev( dlist(Tnode, Telem) & this ) {
    171171                Tnode * it = & $tempcv_e2n( this`last );
    172                 if (!it) return (& this`first == 0p);
     172                if (!it) return (& first( this ) == 0p);
    173173
    174174                while( $prev_link(*it).elem ) {
     
    176176                }
    177177
    178                 return ( it == & $tempcv_e2n( this`first ) ) &&
     178                return ( it == & $tempcv_e2n( first( this ) ) ) &&
    179179                           ( $prev_link(*it).is_terminator ) &&
    180180                           ( ((dlist(Tnode, Telem)*)$prev_link(*it).terminator) == &this );
Note: See TracChangeset for help on using the changeset viewer.