Changes in / [f85de47:65bd3c2]
- Files:
-
- 33 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/collection.hfa
rf85de47 r65bd3c2 26 26 // PUBLIC 27 27 28 void ?{}( Colable & co ) with( co ){29 next = 0p;28 void ?{}( Colable & co ) { 29 co.next = 0p; 30 30 } // post: ! listed() 31 31 32 32 // return true iff *this is an element of a collection 33 bool listed( Colable & co ) with( co ) {// pre: this != 034 return next != 0p;33 bool listed( Colable & co ) { // pre: this != 0 34 return co.next != 0p; 35 35 } 36 36 -
libcfa/src/bits/queue.hfa
rf85de47 r65bd3c2 24 24 Queue(T) & ?=?( const Queue(T) & ) = void; // no assignment 25 25 26 void ?{}( Queue(T) & q ) with( q ){26 void ?{}( Queue(T) & q ) { 27 27 ((Collection &)q){}; 28 last = 0p;28 q.last = 0p; 29 29 } // post: empty() 30 30 31 T & tail( Queue(T) & q ) with( q ){32 return * last;31 T & tail( Queue(T) & q ) { 32 return *q.last; 33 33 } 34 34 … … 46 46 if ( listed( &n ) ) abort( "(Queue &)%p.addHead( %p ) : Node is already on another list.", &q, &n ); 47 47 #endif // __CFA_DEBUG__ 48 if ( last ) {48 if ( q.last ) { 49 49 Next( &n ) = &head( q ); 50 50 q.root = &n; … … 60 60 if ( listed( &n ) ) abort( "(Queue &)%p.addTail( %p ) : Node is already on another list.", &q, &n ); 61 61 #endif // __CFA_DEBUG__ 62 if ( last ) Next( last ) = &n;62 if ( q.last ) Next( last ) = &n; 63 63 else root = &n; 64 64 last = &n; -
libcfa/src/collections/list.hfa
rf85de47 r65bd3c2 10 10 // Created On : Wed Apr 22 18:00:00 2020 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Apr 20 19:04:50202513 // Update Count : 5112 // Last Modified On : Thu Apr 24 18:12:59 2025 13 // Update Count : 72 14 14 // 15 15 … … 72 72 73 73 // The origin is the position encountered at the start of iteration, signifying, "need to advance to the first element," 74 // and at the end of iteration, signifying, "no more elements." Normal comsumption of an iterator runs ?`moveNextas75 // the first step, and uses the return of ?`moveNextas a guard, before dereferencing the iterator. So normal74 // and at the end of iteration, signifying, "no more elements." Normal comsumption of an iterator runs "advance" as 75 // the first step, and uses the return of "advance" as a guard, before dereferencing the iterator. So normal 76 76 // consumption of an iterator does not dereference an iterator in origin position. The value of a pointer (underlying a 77 77 // refence) that is exposed publicly as an iteraor, and also a pointer stored internally in a link field, is tagged, to … … 128 128 129 129 static inline forall( tE &, tLinks & | embedded( tE, tLinks, dlink( tE ) ) ) { 130 bool isListed( tE & node ) { 131 verify( &node != 0p ); 132 dlink( tE ) & node_links = node`inner; 133 return (node_links.prev != 0p) || (node_links.next != 0p); 134 } 135 136 bool isEmpty( dlist( tE, tLinks ) & list ) { 137 tE * firstPtr = list.next; 138 if ( ORIGIN_TAG_QUERY(( size_t)firstPtr) ) firstPtr = 0p; 139 return firstPtr == 0p; 140 } 141 142 tE & first( dlist( tE, tLinks ) & list ) { 143 tE * firstPtr = list.next; 144 if ( ORIGIN_TAG_QUERY( (size_t)firstPtr ) ) firstPtr = 0p; 145 return *firstPtr; 146 } 147 148 tE & last( dlist( tE, tLinks ) & list ) { 149 tE * lastPtr = list.prev; 150 if ( ORIGIN_TAG_QUERY( (size_t)lastPtr) ) lastPtr = 0p; 151 return *lastPtr; 152 } 153 130 154 tE & insert_before( tE & before, tE & node ) { 131 155 verify( &before != 0p ); … … 194 218 } 195 219 196 tE & ?`first( dlist( tE, tLinks ) & list ) { 197 tE * firstPtr = list.next; 198 if ( ORIGIN_TAG_QUERY( (size_t)firstPtr ) ) firstPtr = 0p; 199 return *firstPtr; 200 } 201 202 tE & ?`last( dlist( tE, tLinks ) & list ) { 203 tE * lastPtr = list.prev; 204 if ( ORIGIN_TAG_QUERY( (size_t)lastPtr) ) lastPtr = 0p; 205 return *lastPtr; 206 } 207 208 bool ?`isEmpty( dlist( tE, tLinks ) & list ) { 209 tE * firstPtr = list.next; 210 if ( ORIGIN_TAG_QUERY(( size_t)firstPtr) ) firstPtr = 0p; 211 return firstPtr == 0p; 212 } 213 214 bool ?`isListed( tE & node ) { 215 verify( &node != 0p ); 216 dlink( tE ) & node_links = node`inner; 217 return (node_links.prev != 0p) || (node_links.next != 0p); 218 } 219 220 tE & ?`elems( dlist( tE, tLinks ) & list ) { 220 tE & iter( dlist( tE, tLinks ) & list ) { 221 221 tE * origin = $get_list_origin_addr( list ); 222 222 return *origin; 223 223 } 224 tE & ?`head( dlist( tE, tLinks ) & list ) { 225 return list`elems; 226 } 227 228 bool ?`moveNext( tE && refx ) { 224 225 bool recede( tE && refx ) { 226 tE && ref_inner = refx; 227 tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner ); 228 &ref_inner = oldReferent`inner.prev; 229 return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner ); 230 } 231 232 bool advance( tE && refx ) { 229 233 tE && ref_inner = refx; 230 234 tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner ); … … 232 236 return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner ); 233 237 } 234 bool ?`next( tE && refx ) { // alternate name 235 return refx`moveNext; 236 } 237 238 bool ?`movePrev( tE && refx ) { 239 tE && ref_inner = refx; 240 tE & oldReferent = *(tE*)ORIGIN_TAG_CLEAR( (size_t)&ref_inner ); 241 &ref_inner = oldReferent`inner.prev; 242 return &ref_inner != 0p && ! ORIGIN_TAG_QUERY( (size_t)&ref_inner ); 243 } 244 bool ?`prev( tE && refx ) { // alternate name 245 return refx`movePrev; 246 } 247 248 bool ?`hasNext( tE & node ) { 249 return node`moveNext; 250 } 251 252 bool ?`hasPrev( tE & node ) { 253 return node`movePrev; 254 } 255 256 tE & ?`next( tE & node ) { 257 if ( node`moveNext ) return node; 238 239 bool isFirst( tE & node ) { 240 return recede( node ); 241 } 242 243 bool isLast( tE & node ) { 244 return advance( node ); 245 } 246 247 tE & prev( tE & node ) { 248 if ( recede( node ) ) return node; 258 249 return *0p; 259 250 } 260 251 261 tE & ?`prev( tE & node ) {262 if ( node`movePrev) return node;252 tE & next( tE & node ) { 253 if ( advance( node ) ) return node; 263 254 return *0p; 264 255 } 265 256 266 257 tE & insert_first( dlist( tE, tLinks ) & list, tE & node ) { 267 insert_after( list`elems, node );258 insert_after( iter( list ), node ); 268 259 return node; 269 260 } 270 261 271 262 tE & insert_last( dlist( tE, tLinks ) & list, tE & node ) { 272 insert_before( list`elems, node );273 return node; 274 } 275 tE & insert( dlist( tE, tLinks ) & list, tE & node ) { // alternate name263 insert_before( iter( list ), node ); 264 return node; 265 } 266 tE & insert( dlist( tE, tLinks ) & list, tE & node ) { // synonym for insert_last 276 267 insert_last( list, node ); 277 268 return node; … … 279 270 280 271 tE & remove_first( dlist( tE, tLinks ) & list ) { 281 return remove( list`first ); 272 tE & first_node = first( list ); 273 if ( &first_node ) return remove( first_node ); 274 return first_node; 282 275 } 283 276 284 277 tE & remove_last( dlist( tE, tLinks ) & list ) { 285 return remove( list`last ); 278 tE & last_node = last( list ); 279 if ( &last_node ) return remove( last_node ); 280 return last_node; 286 281 } 287 282 … … 322 317 // } 323 318 324 tE & try_pop_front( dlist( tE, tLinks ) & list ) {325 tE & first_inlist = list`first;326 tE & first_item = first_inlist;327 if ( &first_item ) remove( first_inlist );328 return first_item;329 }330 331 tE & try_pop_back( dlist( tE, tLinks ) & list ) {332 tE & last_inlist = list`last;333 tE & last_item = last_inlist;334 if ( &last_item ) remove( last_inlist );335 return last_item;336 }337 338 339 319 #if ! defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__)) 340 320 bool $validate_fwd( dlist( tE, tLinks ) & this ) { 341 if ( ! & this`first ) return &this`last== 0p;321 if ( ! & first( this ) ) return &last( this ) == 0p; 342 322 343 323 tE & lagElem = *0p; 344 while ( tE & it = this`elems; it`moveNext) {345 if ( & lagElem == 0p && &it != & this`first) return false;324 while ( tE & it = iter( this ); advance( it ) ) { 325 if ( & lagElem == 0p && &it != & first( this ) ) return false; 346 326 &lagElem = ⁢ 347 327 } 348 328 349 if ( &lagElem != & this`last) return false;350 351 // TODO: verify that it is back at this`elems;329 if ( &lagElem != &last( this ) ) return false; 330 331 // TODO: verify that it is back at iter( this ); 352 332 return true; 353 333 } 354 334 355 335 bool $validate_rev( dlist( tE, tLinks ) & this ) { 356 if ( ! & this`last ) return &this`first== 0p;336 if ( ! & last( this ) ) return &first( this ) == 0p; 357 337 358 338 tE & lagElem = *0p; 359 while ( tE & it = this`elems; it`movePrev) {360 if ( &lagElem == 0p && &it != & this`last) return false;339 while ( tE & it = iter( this ); recede( it ) ) { 340 if ( &lagElem == 0p && &it != & last( this ) ) return false; 361 341 &lagElem = ⁢ 362 342 } 363 343 364 if ( &lagElem != & this`first) return false;365 366 // TODO: verify that it is back at this`elems;344 if ( &lagElem != &first( this ) ) return false; 345 346 // TODO: verify that it is back at iter( this ); 367 347 return true; 368 348 } … … 375 355 376 356 // TEMPORARY, until foreach statement created. 377 #define FOREACH( list, index ) for ( typeof( (list)`head) & (index) = (list)`head; (index)`next; )378 #define FOREACH_REV( list, index ) for ( typeof( (list)`head) & (index) = (list)`head; (index)`prev; )379 #define FOREACH_COND( list, index, expr ) for ( typeof( (list)`head) & (index) = (list)`head; (index)`next&& !(expr); )380 #define FOREACH_REV_COND( list, index, expr ) for ( typeof( (list)`head) & (index) = (list)`head; (index)`prev&& !(expr); )357 #define FOREACH( list, index ) for ( typeof(iter( list )) & (index) = iter( list ); advance( index ); ) 358 #define FOREACH_REV( list, index ) for ( typeof(iter( list )) & (index) = iter( list ); recede( index ); ) 359 #define FOREACH_COND( list, index, expr ) for ( typeof(iter( list )) & (index) = iter( list ); advance( index ) && !(expr); ) 360 #define FOREACH_REV_COND( list, index, expr ) for ( typeof(iter( list )) & (index) = iter( list ); recede( index ) && !(expr); ) -
libcfa/src/collections/lockfree.hfa
rf85de47 r65bd3c2 16 16 }; 17 17 18 static inline void ?{}(mcs_queue(T) & this) { this.tail = 0p; } 19 static inline bool empty(const mcs_queue(T) & this) { return !this.tail; } 20 21 static inline forall(| { T * volatile & ?`next ( T * ); }) 22 { 18 static inline void ?{}( mcs_queue(T) & this ) { this.tail = 0p; } 19 static inline bool empty( const mcs_queue(T) & this ) { return ! this.tail; } 20 21 static inline forall( | { T * volatile & next ( T * ); }) { 23 22 // Adds an element to the list 24 23 // Multi-Thread Safe, Lock-Free 25 T * push( mcs_queue(T) & this, T * elem) __attribute__((artificial));26 T * push( mcs_queue(T) & this, T * elem) {27 /* paranoid */ verify( !(elem`next));24 T * push( mcs_queue(T) & this, T * elem ) __attribute__((artificial)); 25 T * push( mcs_queue(T) & this, T * elem ) { 26 /* paranoid */ verify( ! next( elem ) ); 28 27 // Race to add to the tail 29 T * prev = __atomic_exchange_n(&this.tail, elem, __ATOMIC_SEQ_CST);28 T * prev_val = __atomic_exchange_n(&this.tail, elem, __ATOMIC_SEQ_CST); 30 29 // If we aren't the first, we need to tell the person before us 31 30 // No need to 32 if ( prev) prev`next= elem;33 return prev ;31 if ( prev_val ) next( prev_val ) = elem; 32 return prev_val; 34 33 } 35 34 … … 37 36 // Passing an element that is not the head is undefined behavior 38 37 // NOT Multi-Thread Safe, concurrent pushes are safe 39 T * advance( mcs_queue(T) & this, T * elem) __attribute__((artificial));40 T * advance( mcs_queue(T) & this, T * elem) {38 T * advance( mcs_queue(T) & this, T * elem ) __attribute__((artificial)); 39 T * advance( mcs_queue(T) & this, T * elem ) { 41 40 T * expected = elem; 42 41 // Check if this is already the last item … … 44 43 45 44 // If not wait for next item to show-up, filled by push 46 while ( !(elem`next)) Pause();45 while ( ! next( elem ) ) Pause(); 47 46 48 47 // we need to return if the next link was empty 49 T * ret = elem`next;48 T * ret = next( elem ); 50 49 51 50 // invalidate link to reset to initial state 52 elem`next= 0p;51 next( elem ) = 0p; 53 52 return ret; 54 53 } … … 65 64 }; 66 65 67 static inline void ?{}( mpsc_queue(T) & this) {66 static inline void ?{}( mpsc_queue(T) & this ) { 68 67 ((mcs_queue(T)&)this){}; 69 68 this.head = 0p; 70 69 } 71 70 72 static inline forall(| { T * volatile & ?`next ( T * ); }) 73 { 71 static inline forall( | { T * volatile & next ( T * ); }) { 74 72 // Added a new element to the queue 75 73 // Multi-Thread Safe, Lock-Free 76 T * push( mpsc_queue(T) & this, T * elem) __attribute__((artificial));77 T * push( mpsc_queue(T) & this, T * elem) {78 T * prev = push((mcs_queue(T)&)this, elem);79 if ( !prev) this.head = elem;80 return prev ;74 T * push( mpsc_queue(T) & this, T * elem ) __attribute__((artificial)); 75 T * push( mpsc_queue(T) & this, T * elem ) { 76 T * prev_val = push( (mcs_queue(T)&)this, elem ); 77 if ( ! prev_val ) this.head = elem; 78 return prev_val; 81 79 } 82 80 83 81 // Pop an element from the queue 84 82 // return the element that was removed 85 // nextis set to the new head of the queue83 // head is set to the new head of the queue 86 84 // NOT Multi-Thread Safe 87 T * pop( mpsc_queue(T) & this, T *& next) __attribute__((artificial));88 T * pop( mpsc_queue(T) & this, T *& next) {85 T * pop( mpsc_queue(T) & this, T *& head ) __attribute__((artificial)); 86 T * pop( mpsc_queue(T) & this, T *& head ) { 89 87 T * elem = this.head; 90 88 // If head is empty just return 91 if ( !elem) return 0p;89 if ( ! elem ) return 0p; 92 90 93 91 // If there is already someone in the list, then it's easy 94 if ( elem`next) {95 this.head = next = elem`next;92 if ( next( elem ) ) { 93 this.head = head = next( elem ); 96 94 // force memory sync 97 95 __atomic_thread_fence(__ATOMIC_SEQ_CST); 98 96 99 97 // invalidate link to reset to initial state 100 elem`next= 0p;98 next( elem ) = 0p; 101 99 } 102 100 // Otherwise, there might be a race where it only looks but someone is enqueuing … … 106 104 // after that point, it could overwrite the write in push 107 105 this.head = 0p; 108 next = advance((mcs_queue(T)&)this, elem);106 head = advance( (mcs_queue(T)&)this, elem ); 109 107 110 108 // Only write to the head if there is a next element 111 109 // it is the only way we can guarantee we are not overwriting 112 110 // a write made in push 113 if (next) this.head = next; 114 } 115 111 if ( head ) this.head = head; 112 } 116 113 // return removed element 117 114 return elem; … … 119 116 120 117 // Same as previous function 121 T * pop( mpsc_queue(T) & this) {118 T * pop( mpsc_queue(T) & this ) { 122 119 T * _ = 0p; 123 120 return pop(this, _); … … 144 141 static inline bool is_poisoned( const poison_list(T) & this ) { return 1p == this.head; } 145 142 146 static inline forall( | { T * volatile & ?`next( T * ); })143 static inline forall( | { T * volatile & next( T * ); }) 147 144 { 148 145 // Adds an element to the list 149 146 // Multi-Thread Safe, Lock-Free 150 bool push( poison_list(T) & this, T * elem) __attribute__((artificial));151 bool push( poison_list(T) & this, T * elem) {152 /* paranoid */ verify( 0p == (elem`next));153 __atomic_store_n( & elem`next, (T*)1p, __ATOMIC_RELAXED );147 bool push( poison_list(T) & this, T * elem ) __attribute__((artificial)); 148 bool push( poison_list(T) & this, T * elem ) { 149 /* paranoid */ verify( 0p == next( elem ) ); 150 __atomic_store_n( &next( elem ), (T *)1p, __ATOMIC_RELAXED ); 154 151 155 152 // read the head up-front … … 164 161 165 162 // We should never succeed the CAS if it's poisonned and the elem should be 1p. 166 /* paranoid */ verify( expected 167 /* paranoid */ verify( elem`next== 1p );163 /* paranoid */ verify( expected != 1p ); 164 /* paranoid */ verify( next( elem ) == 1p ); 168 165 169 166 // If we aren't the first, we need to tell the person before us 170 167 // No need to 171 elem`next= expected;168 next( elem ) = expected; 172 169 return true; 173 170 } … … 178 175 // Passing an element that is not the head is undefined behavior 179 176 // NOT Multi-Thread Safe, concurrent pushes are safe 180 T * advance( T * elem) __attribute__((artificial));181 T * advance( T * elem) {177 T * advance( T * elem ) __attribute__((artificial)); 178 T * advance( T * elem ) { 182 179 T * ret; 183 180 184 181 // Wait for next item to show-up, filled by push 185 while (1p == (ret = __atomic_load_n( &elem`next, __ATOMIC_RELAXED))) Pause();182 while (1p == (ret = __atomic_load_n( &next( elem ), __ATOMIC_RELAXED ) ) ) Pause(); 186 183 187 184 return ret; … … 189 186 190 187 // Poison the queue, preveting new pushes and returning the head 191 T * poison( poison_list(T) & this) __attribute__((artificial));192 T * poison( poison_list(T) & this) {188 T * poison( poison_list(T) & this ) __attribute__((artificial)); 189 T * poison( poison_list(T) & this ) { 193 190 T * ret = __atomic_exchange_n( &this.head, (T*)1p, __ATOMIC_SEQ_CST ); 194 191 /* paranoid */ verifyf( ret != (T*)1p, "Poison list %p poisoned more than once!", &this ); … … 215 212 }; // Link 216 213 217 forall( T /*| sized(T)*/ | { Link(T) * ?`next( T * ); } ) {214 forall( T /*| sized(T)*/ | { Link(T) * next( T * ); } ) { 218 215 struct StackLF { 219 216 Link(T) stack; … … 226 223 227 224 void push( StackLF(T) & this, T & n ) with(this) { 228 * ( &n )`next= stack; // atomic assignment unnecessary, or use CAA225 *next( &n ) = stack; // atomic assignment unnecessary, or use CAA 229 226 for () { // busy wait 230 if ( __atomic_compare_exchange_n( &stack.atom, & ( &n )`next->atom, (Link(T))@{ (LinkData(T))@{ &n, ( &n )`next->data.count + 1} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) break; // attempt to update top node227 if ( __atomic_compare_exchange_n( &stack.atom, &next( &n )->atom, (Link(T))@{ (LinkData(T))@{ &n, next( &n )->data.count + 1} }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) break; // attempt to update top node 231 228 } // for 232 229 } // push … … 236 233 for () { // busy wait 237 234 if ( t.data.top == 0p ) return 0p; // empty stack ? 238 Link(T) * next = ( t.data.top )`next;235 Link(T) * next = next( t.data.top ); 239 236 if ( __atomic_compare_exchange_n( &stack.atom, &t.atom, (Link(T))@{ (LinkData(T))@{ next->data.top, t.data.count } }.atom, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) return t.data.top; // attempt to update top node 240 237 } // for … … 246 243 // TODO: Avoiding some problems with double fields access. 247 244 LinkData(T) * data = &link->data; 248 T * n ext= (T *)&(*data).top;249 if ( n ext== node ) {250 data->top = ( node )`next->data.top;245 T * ntop = (T *)&(*data).top; 246 if ( ntop == node ) { 247 data->top = next( node )->data.top; 251 248 return true; 252 249 } 253 if ( n ext== 0p ) return false;254 link = ( next )`next;250 if ( ntop == 0p ) return false; 251 link = next( ntop ); 255 252 } 256 253 } -
libcfa/src/collections/vector2.hfa
rf85de47 r65bd3c2 10 10 // Created On : Thu Jun 23 22:00:00 2021 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Mar 14 08:40:53 202313 // Update Count : 212 // Last Modified On : Wed Apr 23 14:39:51 2025 13 // Update Count : 6 14 14 // 15 15 … … 254 254 } 255 255 256 while ( vector_permit(T) & liveIter = this.live_iters_$`elems; liveIter`moveNext) {256 while ( vector_permit(T) & liveIter = iter( this.live_iters_$ ); advance( liveIter ) ) { 257 257 liveIter.item_$ += (newItems - this.buffer_first_$); 258 258 } … … 350 350 *insertTarget = val; 351 351 352 while ( vector_permit(T) & liveIter = col.live_iters_$`elems; liveIter`moveNext) {352 while ( vector_permit(T) & liveIter = iter( col.live_iters_$ ); advance( liveIter ) ) { 353 353 if ( inRange_$(liveIter.item_$, insertTarget, col.elems_end_$) ) { 354 354 liveIter.item_$ += 1; -
libcfa/src/concurrency/alarm.cfa
rf85de47 r65bd3c2 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed Jun 17 16:11:35 202013 // Update Count : 7512 // Last Modified On : Thu Apr 24 22:22:25 2025 13 // Update Count : 88 14 14 // 15 15 … … 84 84 85 85 void insert( alarm_list_t * this, alarm_node_t * n ) { 86 alarm_node_t * it = & (*this)`first; 87 while( it && (n->deadline > it->deadline) ) { 88 it = & (*it)`next; 89 } 90 if ( it ) { 91 insert_before( *it, *n ); 92 } else { 93 insert_last(*this, *n); 94 } 95 86 alarm_node_t & it = iter( *this ); 87 while ( advance( it ) && it.deadline <= n->deadline ); 88 insert_before( it, *n ); 96 89 verify( validate( *this ) ); 97 90 } … … 99 92 alarm_node_t * pop( alarm_list_t * this ) { 100 93 verify( validate( *this ) ); 101 alarm_node_t * head = & (*this)`first;94 alarm_node_t * head = &first( *this ); 102 95 if( head ) { 103 96 remove(*head); … … 147 140 park(); 148 141 149 /* paranoid */ verify( ! node.set );150 /* paranoid */ verify( & n ode`next== 0p );151 /* paranoid */ verify( & node`prev== 0p );142 /* paranoid */ verify( ! node.set ); 143 /* paranoid */ verify( & next( node ) == 0p ); 144 /* paranoid */ verify( & prev( node ) == 0p ); 152 145 } 153 146 -
libcfa/src/concurrency/barrier.hfa
rf85de47 r65bd3c2 11 11 // Created On : Sun Nov 10 08:07:35 2024 12 12 // Last Modified By : Peter A. Buhr 13 // Last Modified On : Wed Nov 13 12:37:04 202414 // Update Count : 913 // Last Modified On : Thu Apr 24 22:41:11 2025 14 // Update Count : 12 15 15 // 16 16 … … 31 31 32 32 // Returns a value indicating the reverse order the threads arrived, i.e. last thread returns 0 (and does not block) 33 // lastis an optional hook that is called by the Gth thread before unblocking the other threads.34 static inline unsigned int block( barrier & mutex b, fptr_t last= (fptr_t)0 ) with( b ) {33 // hook is an optional hook that is called by the Gth thread before unblocking the other threads. 34 static inline unsigned int block( barrier & mutex b, fptr_t hook = (fptr_t)0 ) with( b ) { 35 35 arrivals -= 1; // prefix decrement so last is 0 not 1 36 36 unsigned arrived = b.arrivals; // note arrival order … … 38 38 wait( b.c ); 39 39 } else { // group formed 40 if ( last ) last(); // safe to call40 if ( hook ) hook(); // safe to call 41 41 signal_all( c ); // unblock group 42 42 arrivals = group; // reset -
libcfa/src/concurrency/channel.hfa
rf85de47 r65bd3c2 57 57 58 58 forall( T ) { 59 60 struct __attribute__((aligned(128))) channel { 61 size_t size, front, back, count; 62 T * buffer; 63 dlist( select_node ) prods, cons; // lists of blocked threads 64 go_mutex mutex_lock; // MX lock 65 bool closed; // indicates channel close/open 66 #ifdef CHAN_STATS 67 size_t p_blocks, p_ops, c_blocks, c_ops; // counts total ops and ops resulting in a blocked thd 68 #endif 69 }; 70 static inline void ?{}( channel(T) & this, channel(T) this2 ) = void; 71 static inline void ?=?( channel(T) & this, channel(T) this2 ) = void; 72 73 static inline void ?{}( channel(T) &c, size_t _size ) with(c) { 74 size = _size; 75 front = back = count = 0; 76 if ( size != 0 ) buffer = aalloc( size ); 77 prods{}; 78 cons{}; 79 mutex_lock{}; 80 closed = false; 81 #ifdef CHAN_STATS 82 p_blocks = 0; 83 p_ops = 0; 84 c_blocks = 0; 85 c_ops = 0; 86 #endif 87 } 88 89 static inline void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; } 90 static inline void ^?{}( channel(T) &c ) with(c) { 91 #ifdef CHAN_STATS 92 printf("Channel %p Blocks: %lu,\t\tOperations: %lu,\t%.2f%% of ops blocked\n", &c, p_blocks + c_blocks, p_ops + c_ops, ((double)p_blocks + c_blocks)/(p_ops + c_ops) * 100); 93 printf("Channel %p Consumer Blocks: %lu,\tConsumer Ops: %lu,\t%.2f%% of Consumer ops blocked\n", &c, p_blocks, p_ops, ((double)p_blocks)/p_ops * 100); 94 printf("Channel %p Producer Blocks: %lu,\tProducer Ops: %lu,\t%.2f%% of Producer ops blocked\n", &c, c_blocks, c_ops, ((double)c_blocks)/c_ops * 100); 95 #endif 96 verifyf( __handle_waituntil_OR( cons ) || __handle_waituntil_OR( prods ) || cons`isEmpty && prods`isEmpty, 97 "Attempted to delete channel with waiting threads (Deadlock).\n" ); 98 if ( size != 0 ) delete( buffer ); 99 } 100 static inline size_t get_count( channel(T) & chan ) with(chan) { return __atomic_load_n( &count, __ATOMIC_RELAXED ); } 101 static inline size_t get_size( channel(T) & chan ) with(chan) { return __atomic_load_n( &size, __ATOMIC_RELAXED ); } 102 static inline bool has_waiters( channel(T) & chan ) with(chan) { return !cons`isEmpty || !prods`isEmpty; } 103 static inline bool has_waiting_consumers( channel(T) & chan ) with(chan) { return !cons`isEmpty; } 104 static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return !prods`isEmpty; } 105 106 // closes the channel and notifies all blocked threads 107 static inline void close( channel(T) & chan ) with(chan) { 108 lock( mutex_lock ); 109 closed = true; 110 111 // flush waiting consumers and producers 112 while ( has_waiting_consumers( chan ) ) { 113 if( !__handle_waituntil_OR( cons ) ) // ensure we only signal special OR case threads when they win the race 114 break; // if __handle_waituntil_OR returns false cons is empty so break 115 cons`first.extra = 0p; 116 wake_one( cons ); 117 } 118 while ( has_waiting_producers( chan ) ) { 119 if( !__handle_waituntil_OR( prods ) ) // ensure we only signal special OR case threads when they win the race 120 break; // if __handle_waituntil_OR returns false prods is empty so break 121 prods`first.extra = 0p; 122 wake_one( prods ); 123 } 124 unlock(mutex_lock); 125 } 126 127 static inline void is_closed( channel(T) & chan ) with(chan) { return closed; } 128 129 // used to hand an element to a blocked consumer and signal it 130 static inline void __cons_handoff( channel(T) & chan, T & elem ) with(chan) { 131 memcpy( cons`first.extra, (void *)&elem, sizeof(T) ); // do waiting consumer work 132 wake_one( cons ); 133 } 134 135 // used to hand an element to a blocked producer and signal it 136 static inline void __prods_handoff( channel(T) & chan, T & retval ) with(chan) { 137 memcpy( (void *)&retval, prods`first.extra, sizeof(T) ); 138 wake_one( prods ); 139 } 140 141 static inline void flush( channel(T) & chan, T elem ) with(chan) { 142 lock( mutex_lock ); 143 while ( count == 0 && !cons`isEmpty ) { 144 __cons_handoff( chan, elem ); 145 } 146 unlock( mutex_lock ); 147 } 148 149 // handles buffer insert 150 static inline void __buf_insert( channel(T) & chan, T & elem ) with(chan) { 151 memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T) ); 152 count += 1; 153 back++; 154 if ( back == size ) back = 0; 155 } 156 157 // needed to avoid an extra copy in closed case 158 static inline bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) { 159 lock( mutex_lock ); 160 #ifdef CHAN_STATS 161 p_ops++; 162 #endif 163 164 ConsEmpty: if ( !cons`isEmpty ) { 165 if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty; 166 __cons_handoff( chan, elem ); 167 unlock( mutex_lock ); 168 return true; 169 } 170 171 if ( count == size ) { unlock( mutex_lock ); return false; } 172 173 __buf_insert( chan, elem ); 174 unlock( mutex_lock ); 175 return true; 176 } 177 178 // attempts a nonblocking insert 179 // returns true if insert was successful, false otherwise 180 static inline bool try_insert( channel(T) & chan, T elem ) { return __internal_try_insert( chan, elem ); } 181 182 // handles closed case of insert routine 183 static inline void __closed_insert( channel(T) & chan, T & elem ) with(chan) { 184 channel_closed except{ &channel_closed_vt, &elem, &chan }; 185 throwResume except; // throw closed resumption 186 if ( !__internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination 187 } 188 189 static inline void insert( channel(T) & chan, T elem ) with(chan) { 190 // check for close before acquire mx 191 if ( unlikely(closed) ) { 192 __closed_insert( chan, elem ); 193 return; 194 } 195 196 lock( mutex_lock ); 197 198 #ifdef CHAN_STATS 199 if ( !closed ) p_ops++; 200 #endif 201 202 // if closed handle 203 if ( unlikely(closed) ) { 204 unlock( mutex_lock ); 205 __closed_insert( chan, elem ); 206 return; 207 } 208 209 // buffer count must be zero if cons are blocked (also handles zero-size case) 210 ConsEmpty: if ( !cons`isEmpty ) { 211 if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty; 212 __cons_handoff( chan, elem ); 213 unlock( mutex_lock ); 214 return; 215 } 216 217 // wait if buffer is full, work will be completed by someone else 218 if ( count == size ) { 219 #ifdef CHAN_STATS 220 p_blocks++; 221 #endif 222 223 // check for if woken due to close 224 if ( unlikely( block( prods, &elem, mutex_lock ) ) ) 225 __closed_insert( chan, elem ); 226 return; 227 } // if 228 229 __buf_insert( chan, elem ); 230 unlock( mutex_lock ); 231 } 232 233 // does the buffer remove and potentially does waiting producer work 234 static inline void __do_remove( channel(T) & chan, T & retval ) with(chan) { 235 memcpy( (void *)&retval, (void *)&buffer[front], sizeof(T) ); 236 count -= 1; 237 front = (front + 1) % size; 238 if (count == size - 1 && !prods`isEmpty ) { 239 if ( !__handle_waituntil_OR( prods ) ) return; 240 __buf_insert( chan, *(T *)prods`first.extra ); // do waiting producer work 241 wake_one( prods ); 242 } 243 } 244 245 // needed to avoid an extra copy in closed case and single return val case 246 static inline bool __internal_try_remove( channel(T) & chan, T & retval ) with(chan) { 247 lock( mutex_lock ); 248 #ifdef CHAN_STATS 249 c_ops++; 250 #endif 251 252 ZeroSize: if ( size == 0 && !prods`isEmpty ) { 253 if ( !__handle_waituntil_OR( prods ) ) break ZeroSize; 254 __prods_handoff( chan, retval ); 255 unlock( mutex_lock ); 256 return true; 257 } 258 259 if ( count == 0 ) { unlock( mutex_lock ); return false; } 260 261 __do_remove( chan, retval ); 262 unlock( mutex_lock ); 263 return true; 264 } 265 266 // attempts a nonblocking remove 267 // returns [T, true] if insert was successful 268 // returns [T, false] if insert was successful (T uninit) 269 static inline [T, bool] try_remove( channel(T) & chan ) { 270 T retval; 271 bool success = __internal_try_remove( chan, retval ); 272 return [ retval, success ]; 273 } 274 275 static inline T try_remove( channel(T) & chan ) { 276 T retval; 277 __internal_try_remove( chan, retval ); 278 return retval; 279 } 280 281 // handles closed case of insert routine 282 static inline void __closed_remove( channel(T) & chan, T & retval ) with(chan) { 283 channel_closed except{ &channel_closed_vt, 0p, &chan }; 284 throwResume except; // throw resumption 285 if ( !__internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination 286 } 287 288 static inline T remove( channel(T) & chan ) with(chan) { 289 T retval; 290 if ( unlikely(closed) ) { 291 __closed_remove( chan, retval ); 292 return retval; 293 } 294 lock( mutex_lock ); 295 296 #ifdef CHAN_STATS 297 if ( !closed ) c_ops++; 298 #endif 299 300 if ( unlikely(closed) ) { 301 unlock( mutex_lock ); 302 __closed_remove( chan, retval ); 303 return retval; 304 } 305 306 // have to check for the zero size channel case 307 ZeroSize: if ( size == 0 && !prods`isEmpty ) { 308 if ( !__handle_waituntil_OR( prods ) ) break ZeroSize; 309 __prods_handoff( chan, retval ); 310 unlock( mutex_lock ); 311 return retval; 312 } 313 314 // wait if buffer is empty, work will be completed by someone else 315 if ( count == 0 ) { 316 #ifdef CHAN_STATS 317 c_blocks++; 318 #endif 319 // check for if woken due to close 320 if ( unlikely( block( cons, &retval, mutex_lock ) ) ) 321 __closed_remove( chan, retval ); 322 return retval; 323 } 324 325 // Remove from buffer 326 __do_remove( chan, retval ); 327 unlock( mutex_lock ); 328 return retval; 329 } 330 static inline void remove( channel(T) & chan ) { T elem = (T)remove( chan ); } 331 332 333 /////////////////////////////////////////////////////////////////////////////////////////// 334 // The following is Go-style operator support for channels 335 /////////////////////////////////////////////////////////////////////////////////////////// 336 337 static inline void ?<<?( channel(T) & chan, T elem ) { insert( chan, elem ); } 338 static inline void ?<<?( T & ret, channel(T) & chan ) { ret = remove( chan ); } 339 340 /////////////////////////////////////////////////////////////////////////////////////////// 341 // The following is support for waituntil (select) statements 342 /////////////////////////////////////////////////////////////////////////////////////////// 343 static inline bool unregister_chan( channel(T) & chan, select_node & node ) with(chan) { 344 if ( !node`isListed && !node.park_counter ) return false; // handle special OR case 345 lock( mutex_lock ); 346 if ( node`isListed ) { // op wasn't performed 347 remove( node ); 348 unlock( mutex_lock ); 349 return false; 350 } 351 unlock( mutex_lock ); 352 353 // only return true when not special OR case and status is SAT 354 return !node.park_counter ? false : *node.clause_status == __SELECT_SAT; 355 } 356 357 // special case of __handle_waituntil_OR, that does some work to avoid starvation/deadlock case 358 static inline bool __handle_pending( dlist( select_node ) & queue, select_node & mine ) { 359 while ( !queue`isEmpty ) { 360 // if node not a special OR case or if we win the special OR case race break 361 if ( !queue`first.clause_status || queue`first.park_counter || __pending_set_other( queue`first, mine, ((unsigned long int)(&(queue`first))) ) ) 362 return true; 59 struct __attribute__((aligned(128))) channel { 60 size_t size, front, back, count; 61 T * buffer; 62 dlist( select_node ) prods, cons; // lists of blocked threads 63 go_mutex mutex_lock; // MX lock 64 bool closed; // indicates channel close/open 65 #ifdef CHAN_STATS 66 size_t p_blocks, p_ops, c_blocks, c_ops; // counts total ops and ops resulting in a blocked thd 67 #endif 68 }; 69 70 // type used by select statement to capture a chan read as the selected operation 71 struct chan_read { 72 T * ret; 73 channel(T) * chan; 74 }; 75 __CFA_SELECT_GET_TYPE( chan_read(T) ); 76 77 // type used by select statement to capture a chan read as the selected operation that doesn't have a param to read to 78 struct chan_read_no_ret { 79 T retval; 80 chan_read( T ) c_read; 81 }; 82 __CFA_SELECT_GET_TYPE( chan_read_no_ret(T) ); 83 84 // type used by select statement to capture a chan write as the selected operation 85 struct chan_write { 86 T elem; 87 channel(T) * chan; 88 }; 89 __CFA_SELECT_GET_TYPE( chan_write(T) ); 90 } // distribution 91 92 static inline forall( T ) { 93 void ?{}( channel(T) & this, channel(T) this2 ) = void; 94 void ?=?( channel(T) & this, channel(T) this2 ) = void; 95 96 void ?{}( channel(T) &c, size_t _size ) with(c) { 97 size = _size; 98 front = back = count = 0; 99 if ( size != 0 ) buffer = aalloc( size ); 100 prods{}; 101 cons{}; 102 mutex_lock{}; 103 closed = false; 104 #ifdef CHAN_STATS 105 p_blocks = 0; 106 p_ops = 0; 107 c_blocks = 0; 108 c_ops = 0; 109 #endif 110 } 111 112 void ?{}( channel(T) &c ){ ((channel(T) &)c){ 0 }; } 113 void ^?{}( channel(T) &c ) with(c) { 114 #ifdef CHAN_STATS 115 printf("Channel %p Blocks: %lu,\t\tOperations: %lu,\t%.2f%% of ops blocked\n", &c, p_blocks + c_blocks, p_ops + c_ops, ((double)p_blocks + c_blocks)/(p_ops + c_ops) * 100); 116 printf("Channel %p Consumer Blocks: %lu,\tConsumer Ops: %lu,\t%.2f%% of Consumer ops blocked\n", &c, p_blocks, p_ops, ((double)p_blocks)/p_ops * 100); 117 printf("Channel %p Producer Blocks: %lu,\tProducer Ops: %lu,\t%.2f%% of Producer ops blocked\n", &c, c_blocks, c_ops, ((double)c_blocks)/c_ops * 100); 118 #endif 119 verifyf( __handle_waituntil_OR( cons ) || __handle_waituntil_OR( prods ) || isEmpty( cons ) && isEmpty( prods ), 120 "Attempted to delete channel with waiting threads (Deadlock).\n" ); 121 if ( size != 0 ) delete( buffer ); 122 } 123 size_t get_count( channel(T) & chan ) with(chan) { return __atomic_load_n( &count, __ATOMIC_RELAXED ); } 124 size_t get_size( channel(T) & chan ) with(chan) { return __atomic_load_n( &size, __ATOMIC_RELAXED ); } 125 bool has_waiters( channel(T) & chan ) with(chan) { return ! isEmpty( cons ) || ! isEmpty( prods ); } 126 bool has_waiting_consumers( channel(T) & chan ) with(chan) { return ! isEmpty( cons ); } 127 bool has_waiting_producers( channel(T) & chan ) with(chan) { return ! isEmpty( prods ); } 128 129 // closes the channel and notifies all blocked threads 130 void close( channel(T) & chan ) with(chan) { 131 lock( mutex_lock ); 132 closed = true; 133 134 // flush waiting consumers and producers 135 while ( has_waiting_consumers( chan ) ) { 136 if( ! __handle_waituntil_OR( cons ) ) // ensure we only signal special OR case threads when they win the race 137 break; // if __handle_waituntil_OR returns false cons is empty so break 138 first( cons ).extra = 0p; 139 wake_one( cons ); 140 } 141 while ( has_waiting_producers( chan ) ) { 142 if( ! __handle_waituntil_OR( prods ) ) // ensure we only signal special OR case threads when they win the race 143 break; // if __handle_waituntil_OR returns false prods is empty so break 144 first( prods ).extra = 0p; 145 wake_one( prods ); 146 } 147 unlock(mutex_lock); 148 } 149 150 void is_closed( channel(T) & chan ) with(chan) { return closed; } 151 152 // used to hand an element to a blocked consumer and signal it 153 void __cons_handoff( channel(T) & chan, T & elem ) with(chan) { 154 memcpy( first( cons ).extra, (void *)&elem, sizeof(T) ); // do waiting consumer work 155 wake_one( cons ); 156 } 157 158 // used to hand an element to a blocked producer and signal it 159 void __prods_handoff( channel(T) & chan, T & retval ) with(chan) { 160 memcpy( (void *)&retval, first( prods ).extra, sizeof(T) ); 161 wake_one( prods ); 162 } 163 164 void flush( channel(T) & chan, T elem ) with(chan) { 165 lock( mutex_lock ); 166 while ( count == 0 && ! isEmpty( cons ) ) { 167 __cons_handoff( chan, elem ); 168 } 169 unlock( mutex_lock ); 170 } 171 172 // handles buffer insert 173 void __buf_insert( channel(T) & chan, T & elem ) with(chan) { 174 memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T) ); 175 count += 1; 176 back++; 177 if ( back == size ) back = 0; 178 } 179 180 // needed to avoid an extra copy in closed case 181 bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) { 182 lock( mutex_lock ); 183 #ifdef CHAN_STATS 184 p_ops++; 185 #endif 186 187 ConsEmpty: 188 if ( ! isEmpty( cons ) ) { 189 if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty; 190 __cons_handoff( chan, elem ); 191 unlock( mutex_lock ); 192 return true; 193 } 194 195 if ( count == size ) { unlock( mutex_lock ); return false; } 196 197 __buf_insert( chan, elem ); 198 unlock( mutex_lock ); 199 return true; 200 } 201 202 // attempts a nonblocking insert 203 // returns true if insert was successful, false otherwise 204 bool try_insert( channel(T) & chan, T elem ) { return __internal_try_insert( chan, elem ); } 205 206 // handles closed case of insert routine 207 void __closed_insert( channel(T) & chan, T & elem ) with(chan) { 208 channel_closed except{ &channel_closed_vt, &elem, &chan }; 209 throwResume except; // throw closed resumption 210 if ( ! __internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination 211 } 212 213 void insert( channel(T) & chan, T elem ) with(chan) { 214 // check for close before acquire mx 215 if ( unlikely(closed) ) { 216 __closed_insert( chan, elem ); 217 return; 218 } 219 220 lock( mutex_lock ); 221 222 #ifdef CHAN_STATS 223 if ( ! closed ) p_ops++; 224 #endif 225 226 // if closed handle 227 if ( unlikely(closed) ) { 228 unlock( mutex_lock ); 229 __closed_insert( chan, elem ); 230 return; 231 } 232 233 // buffer count must be zero if cons are blocked (also handles zero-size case) 234 ConsEmpty: 235 if ( ! isEmpty( cons ) ) { 236 if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty; 237 __cons_handoff( chan, elem ); 238 unlock( mutex_lock ); 239 return; 240 } 241 242 // wait if buffer is full, work will be completed by someone else 243 if ( count == size ) { 244 #ifdef CHAN_STATS 245 p_blocks++; 246 #endif 247 248 // check for if woken due to close 249 if ( unlikely( block( prods, &elem, mutex_lock ) ) ) 250 __closed_insert( chan, elem ); 251 return; 252 } // if 253 254 __buf_insert( chan, elem ); 255 unlock( mutex_lock ); 256 } 257 258 // does the buffer remove and potentially does waiting producer work 259 void __do_remove( channel(T) & chan, T & retval ) with(chan) { 260 memcpy( (void *)&retval, (void *)&buffer[front], sizeof(T) ); 261 count -= 1; 262 front = (front + 1) % size; 263 if (count == size - 1 && ! isEmpty( prods ) ) { 264 if ( ! __handle_waituntil_OR( prods ) ) return; 265 __buf_insert( chan, *(T *)first( prods ).extra ); // do waiting producer work 266 wake_one( prods ); 267 } 268 } 269 270 // needed to avoid an extra copy in closed case and single return val case 271 bool __internal_try_remove( channel(T) & chan, T & retval ) with(chan) { 272 lock( mutex_lock ); 273 #ifdef CHAN_STATS 274 c_ops++; 275 #endif 276 277 ZeroSize: 278 if ( size == 0 && ! isEmpty( prods ) ) { 279 if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize; 280 __prods_handoff( chan, retval ); 281 unlock( mutex_lock ); 282 return true; 283 } 284 285 if ( count == 0 ) { unlock( mutex_lock ); return false; } 286 287 __do_remove( chan, retval ); 288 unlock( mutex_lock ); 289 return true; 290 } 291 292 // attempts a nonblocking remove 293 // returns [T, true] if insert was successful 294 // returns [T, false] if insert was successful (T uninit) 295 [T, bool] try_remove( channel(T) & chan ) { 296 T retval; 297 bool success = __internal_try_remove( chan, retval ); 298 return [ retval, success ]; 299 } 300 301 T try_remove( channel(T) & chan ) { 302 T retval; 303 __internal_try_remove( chan, retval ); 304 return retval; 305 } 306 307 // handles closed case of insert routine 308 void __closed_remove( channel(T) & chan, T & retval ) with(chan) { 309 channel_closed except{ &channel_closed_vt, 0p, &chan }; 310 throwResume except; // throw resumption 311 if ( ! __internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination 312 } 313 314 T remove( channel(T) & chan ) with(chan) { 315 T retval; 316 if ( unlikely(closed) ) { 317 __closed_remove( chan, retval ); 318 return retval; 319 } 320 lock( mutex_lock ); 321 322 #ifdef CHAN_STATS 323 if ( ! closed ) c_ops++; 324 #endif 325 326 if ( unlikely(closed) ) { 327 unlock( mutex_lock ); 328 __closed_remove( chan, retval ); 329 return retval; 330 } 331 332 // have to check for the zero size channel case 333 ZeroSize: 334 if ( size == 0 && ! isEmpty( prods ) ) { 335 if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize; 336 __prods_handoff( chan, retval ); 337 unlock( mutex_lock ); 338 return retval; 339 } 340 341 // wait if buffer is empty, work will be completed by someone else 342 if ( count == 0 ) { 343 #ifdef CHAN_STATS 344 c_blocks++; 345 #endif 346 // check for if woken due to close 347 if ( unlikely( block( cons, &retval, mutex_lock ) ) ) 348 __closed_remove( chan, retval ); 349 return retval; 350 } 351 352 // Remove from buffer 353 __do_remove( chan, retval ); 354 unlock( mutex_lock ); 355 return retval; 356 } 357 void remove( channel(T) & chan ) { T elem = (T)remove( chan ); } 358 359 360 /////////////////////////////////////////////////////////////////////////////////////////// 361 // The following is Go-style operator support for channels 362 /////////////////////////////////////////////////////////////////////////////////////////// 363 364 void ?<<?( channel(T) & chan, T elem ) { insert( chan, elem ); } 365 void ?<<?( T & ret, channel(T) & chan ) { ret = remove( chan ); } 366 367 /////////////////////////////////////////////////////////////////////////////////////////// 368 // The following is support for waituntil (select) statements 369 /////////////////////////////////////////////////////////////////////////////////////////// 370 bool unregister_chan( channel(T) & chan, select_node & node ) with(chan) { 371 if ( ! isListed( node ) && ! node.park_counter ) return false; // handle special OR case 372 lock( mutex_lock ); 373 if ( isListed( node ) ) { // op wasn't performed 374 remove( node ); 375 unlock( mutex_lock ); 376 return false; 377 } 378 unlock( mutex_lock ); 379 380 // only return true when not special OR case and status is SAT 381 return ! node.park_counter ? false : *node.clause_status == __SELECT_SAT; 382 } 383 384 // special case of __handle_waituntil_OR, that does some work to avoid starvation/deadlock case 385 bool __handle_pending( dlist( select_node ) & queue, select_node & mine ) { 386 while ( ! isEmpty( queue ) ) { 387 // if node not a special OR case or if we win the special OR case race break 388 if ( ! first( queue ).clause_status || first( queue ).park_counter || __pending_set_other( first( queue ), mine, ((unsigned long int)(&(first( queue )))) ) ) 389 return true; 363 390 364 // our node lost the race when toggling in __pending_set_other 365 if ( *mine.clause_status != __SELECT_PENDING ) 366 return false; 367 368 // otherwise we lost the special OR race so discard node 369 try_pop_front( queue ); 370 } 371 return false; 372 } 373 374 // type used by select statement to capture a chan read as the selected operation 375 struct chan_read { 376 T * ret; 377 channel(T) * chan; 378 }; 379 __CFA_SELECT_GET_TYPE( chan_read(T) ); 380 381 static inline void ?{}( chan_read(T) & cr, channel(T) * chan, T * ret ) { 382 cr.chan = chan; 383 cr.ret = ret; 384 } 385 static inline chan_read(T) ?<<?( T & ret, channel(T) & chan ) { chan_read(T) cr{ &chan, &ret }; return cr; } 386 387 static inline void __handle_select_closed_read( chan_read(T) & this, select_node & node ) with(*this.chan, this) { 388 __closed_remove( *chan, *ret ); 389 // if we get here then the insert succeeded 390 __make_select_node_available( node ); 391 } 392 393 static inline bool register_select( chan_read(T) & this, select_node & node ) with(*this.chan, this) { 394 lock( mutex_lock ); 395 node.extra = ret; // set .extra so that if it == 0p later in on_selected it is due to channel close 396 397 #ifdef CHAN_STATS 398 if ( !closed ) c_ops++; 399 #endif 400 401 if ( !node.park_counter ) { 402 // are we special case OR and front of cons is also special case OR 403 if ( !unlikely(closed) && !prods`isEmpty && prods`first.clause_status && !prods`first.park_counter ) { 404 if ( !__make_select_node_pending( node ) ) { 405 unlock( mutex_lock ); 406 return false; 407 } 408 409 if ( __handle_pending( prods, node ) ) { 410 __prods_handoff( *chan, *ret ); 411 __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node 412 unlock( mutex_lock ); 413 return true; 414 } 415 if ( *node.clause_status == __SELECT_PENDING ) 416 __make_select_node_unsat( node ); 417 } 418 // check if we can complete operation. If so race to establish winner in special OR case 419 if ( count != 0 || !prods`isEmpty || unlikely(closed) ) { 420 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 421 unlock( mutex_lock ); 422 return false; 423 } 424 } 425 } 426 427 if ( unlikely(closed) ) { 428 unlock( mutex_lock ); 429 __handle_select_closed_read( this, node ); 430 return true; 431 } 432 433 // have to check for the zero size channel case 434 ZeroSize: if ( size == 0 && !prods`isEmpty ) { 435 if ( !__handle_waituntil_OR( prods ) ) break ZeroSize; 436 __prods_handoff( *chan, *ret ); 437 __set_avail_then_unlock( node, mutex_lock ); 438 return true; 439 } 440 441 // wait if buffer is empty, work will be completed by someone else 442 if ( count == 0 ) { 443 #ifdef CHAN_STATS 444 c_blocks++; 445 #endif 391 // our node lost the race when toggling in __pending_set_other 392 if ( *mine.clause_status != __SELECT_PENDING ) 393 return false; 394 395 // otherwise we lost the special OR race so discard node 396 remove_first( queue ); 397 } 398 return false; 399 } 400 401 void ?{}( chan_read(T) & cr, channel(T) * chan, T * ret ) { 402 cr.chan = chan; 403 cr.ret = ret; 404 } 405 chan_read(T) ?<<?( T & ret, channel(T) & chan ) { chan_read(T) cr{ &chan, &ret }; return cr; } 406 407 void __handle_select_closed_read( chan_read(T) & this, select_node & node ) with(*this.chan, this) { 408 __closed_remove( *chan, *ret ); 409 // if we get here then the insert succeeded 410 __make_select_node_available( node ); 411 } 412 413 bool register_select( chan_read(T) & this, select_node & node ) with(*this.chan, this) { 414 lock( mutex_lock ); 415 node.extra = ret; // set .extra so that if it == 0p later in on_selected it is due to channel close 416 417 #ifdef CHAN_STATS 418 if ( ! closed ) c_ops++; 419 #endif 420 421 if ( ! node.park_counter ) { 422 // are we special case OR and front of cons is also special case OR 423 if ( ! unlikely(closed) && ! isEmpty( prods ) && first( prods ).clause_status && ! first( prods ).park_counter ) { 424 if ( ! __make_select_node_pending( node ) ) { 425 unlock( mutex_lock ); 426 return false; 427 } 428 429 if ( __handle_pending( prods, node ) ) { 430 __prods_handoff( *chan, *ret ); 431 __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node 432 unlock( mutex_lock ); 433 return true; 434 } 435 if ( *node.clause_status == __SELECT_PENDING ) 436 __make_select_node_unsat( node ); 437 } 438 // check if we can complete operation. If so race to establish winner in special OR case 439 if ( count != 0 || ! isEmpty( prods ) || unlikely(closed) ) { 440 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering 441 unlock( mutex_lock ); 442 return false; 443 } 444 } 445 } 446 447 if ( unlikely(closed) ) { 448 unlock( mutex_lock ); 449 __handle_select_closed_read( this, node ); 450 return true; 451 } 452 453 // have to check for the zero size channel case 454 ZeroSize: 455 if ( size == 0 && ! isEmpty( prods ) ) { 456 if ( ! __handle_waituntil_OR( prods ) ) break ZeroSize; 457 __prods_handoff( *chan, *ret ); 458 __set_avail_then_unlock( node, mutex_lock ); 459 return true; 460 } 461 462 // wait if buffer is empty, work will be completed by someone else 463 if ( count == 0 ) { 464 #ifdef CHAN_STATS 465 c_blocks++; 466 #endif 446 467 447 insert_last( cons, node ); 448 unlock( mutex_lock ); 449 return false; 450 } 451 452 // Remove from buffer 453 __do_remove( *chan, *ret ); 454 __set_avail_then_unlock( node, mutex_lock ); 455 return true; 456 } 457 static inline bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); } 458 static inline bool on_selected( chan_read(T) & this, select_node & node ) with(this) { 459 if ( unlikely(node.extra == 0p) ) { 460 if ( !exception_in_flight() ) __closed_remove( *chan, *ret ); // check if woken up due to closed channel 461 else return false; 462 } 463 // This is only reachable if not closed or closed exception was handled 464 return true; 465 } 466 467 // type used by select statement to capture a chan read as the selected operation that doesn't have a param to read to 468 struct chan_read_no_ret { 469 T retval; 470 chan_read( T ) c_read; 471 }; 472 __CFA_SELECT_GET_TYPE( chan_read_no_ret(T) ); 473 474 static inline void ?{}( chan_read_no_ret(T) & this, channel(T) & chan ) { 475 this.c_read{ &chan, &this.retval }; 476 } 477 478 static inline chan_read_no_ret(T) remove( channel(T) & chan ) { chan_read_no_ret(T) c_read{ chan }; return c_read; } 479 static inline bool register_select( chan_read_no_ret(T) & this, select_node & node ) { 480 this.c_read.ret = &this.retval; 481 return register_select( this.c_read, node ); 482 } 483 static inline bool unregister_select( chan_read_no_ret(T) & this, select_node & node ) { return unregister_select( this.c_read, node ); } 484 static inline bool on_selected( chan_read_no_ret(T) & this, select_node & node ) { return on_selected( this.c_read, node ); } 485 486 // type used by select statement to capture a chan write as the selected operation 487 struct chan_write { 488 T elem; 489 channel(T) * chan; 490 }; 491 __CFA_SELECT_GET_TYPE( chan_write(T) ); 492 493 static inline void ?{}( chan_write(T) & cw, channel(T) * chan, T elem ) { 494 cw.chan = chan; 495 memcpy( (void *)&cw.elem, (void *)&elem, sizeof(T) ); 496 } 497 static inline chan_write(T) ?<<?( channel(T) & chan, T elem ) { chan_write(T) cw{ &chan, elem }; return cw; } 498 static inline chan_write(T) insert( T elem, channel(T) & chan) { chan_write(T) cw{ &chan, elem }; return cw; } 499 500 static inline void __handle_select_closed_write( chan_write(T) & this, select_node & node ) with(*this.chan, this) { 501 __closed_insert( *chan, elem ); 502 // if we get here then the insert succeeded 503 __make_select_node_available( node ); 504 } 505 506 static inline bool register_select( chan_write(T) & this, select_node & node ) with(*this.chan, this) { 507 lock( mutex_lock ); 508 node.extra = &elem; // set .extra so that if it == 0p later in on_selected it is due to channel close 509 510 #ifdef CHAN_STATS 511 if ( !closed ) p_ops++; 512 #endif 513 514 // special OR case handling 515 if ( !node.park_counter ) { 516 // are we special case OR and front of cons is also special case OR 517 if ( !unlikely(closed) && !cons`isEmpty && cons`first.clause_status && !cons`first.park_counter ) { 518 if ( !__make_select_node_pending( node ) ) { 519 unlock( mutex_lock ); 520 return false; 521 } 522 523 if ( __handle_pending( cons, node ) ) { 524 __cons_handoff( *chan, elem ); 525 __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node 526 unlock( mutex_lock ); 527 return true; 528 } 529 if ( *node.clause_status == __SELECT_PENDING ) 530 __make_select_node_unsat( node ); 531 } 532 // check if we can complete operation. If so race to establish winner in special OR case 533 if ( count != size || !cons`isEmpty || unlikely(closed) ) { 534 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 535 unlock( mutex_lock ); 536 return false; 537 } 538 } 539 } 540 541 // if closed handle 542 if ( unlikely(closed) ) { 543 unlock( mutex_lock ); 544 __handle_select_closed_write( this, node ); 545 return true; 546 } 547 548 // handle blocked consumer case via handoff (buffer is implicitly empty) 549 ConsEmpty: if ( !cons`isEmpty ) { 550 if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty; 551 __cons_handoff( *chan, elem ); 552 __set_avail_then_unlock( node, mutex_lock ); 553 return true; 554 } 555 556 // insert node in list if buffer is full, work will be completed by someone else 557 if ( count == size ) { 558 #ifdef CHAN_STATS 559 p_blocks++; 560 #endif 561 562 insert_last( prods, node ); 563 unlock( mutex_lock ); 564 return false; 565 } // if 566 567 // otherwise carry out write either via normal insert 568 __buf_insert( *chan, elem ); 569 __set_avail_then_unlock( node, mutex_lock ); 570 return true; 571 } 572 static inline bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); } 573 574 static inline bool on_selected( chan_write(T) & this, select_node & node ) with(this) { 575 if ( unlikely(node.extra == 0p) ) { 576 if ( !exception_in_flight() ) __closed_insert( *chan, elem ); // check if woken up due to closed channel 577 else return false; 578 } 579 // This is only reachable if not closed or closed exception was handled 580 return true; 581 } 582 583 } // forall( T ) 584 585 468 insert_last( cons, node ); 469 unlock( mutex_lock ); 470 return false; 471 } 472 473 // Remove from buffer 474 __do_remove( *chan, *ret ); 475 __set_avail_then_unlock( node, mutex_lock ); 476 return true; 477 } 478 bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); } 479 bool on_selected( chan_read(T) & this, select_node & node ) with(this) { 480 if ( unlikely(node.extra == 0p) ) { 481 if ( ! exception_in_flight() ) __closed_remove( *chan, *ret ); // check if woken up due to closed channel 482 else return false; 483 } 484 // This is only reachable if not closed or closed exception was handled 485 return true; 486 } 487 488 void ?{}( chan_read_no_ret(T) & this, channel(T) & chan ) { 489 this.c_read{ &chan, &this.retval }; 490 } 491 492 chan_read_no_ret(T) remove( channel(T) & chan ) { chan_read_no_ret(T) c_read{ chan }; return c_read; } 493 bool register_select( chan_read_no_ret(T) & this, select_node & node ) { 494 this.c_read.ret = &this.retval; 495 return register_select( this.c_read, node ); 496 } 497 bool unregister_select( chan_read_no_ret(T) & this, select_node & node ) { return unregister_select( this.c_read, node ); } 498 bool on_selected( chan_read_no_ret(T) & this, select_node & node ) { return on_selected( this.c_read, node ); } 499 500 void ?{}( chan_write(T) & cw, channel(T) * chan, T elem ) { 501 cw.chan = chan; 502 memcpy( (void *)&cw.elem, (void *)&elem, sizeof(T) ); 503 } 504 chan_write(T) ?<<?( channel(T) & chan, T elem ) { chan_write(T) cw{ &chan, elem }; return cw; } 505 chan_write(T) insert( T elem, channel(T) & chan) { chan_write(T) cw{ &chan, elem }; return cw; } 506 507 void __handle_select_closed_write( chan_write(T) & this, select_node & node ) with(*this.chan, this) { 508 __closed_insert( *chan, elem ); 509 // if we get here then the insert succeeded 510 __make_select_node_available( node ); 511 } 512 513 bool register_select( chan_write(T) & this, select_node & node ) with(*this.chan, this) { 514 lock( mutex_lock ); 515 node.extra = &elem; // set .extra so that if it == 0p later in on_selected it is due to channel close 516 517 #ifdef CHAN_STATS 518 if ( ! closed ) p_ops++; 519 #endif 520 521 // special OR case handling 522 if ( ! node.park_counter ) { 523 // are we special case OR and front of cons is also special case OR 524 if ( ! unlikely(closed) && ! isEmpty( cons ) && first( cons ).clause_status && ! first( cons ).park_counter ) { 525 if ( ! __make_select_node_pending( node ) ) { 526 unlock( mutex_lock ); 527 return false; 528 } 529 if ( __handle_pending( cons, node ) ) { 530 __cons_handoff( *chan, elem ); 531 __make_select_node_sat( node ); // need to to mark SAT now that we know operation is done or else threads could get stuck in __mark_select_node 532 unlock( mutex_lock ); 533 return true; 534 } 535 if ( *node.clause_status == __SELECT_PENDING ) 536 __make_select_node_unsat( node ); 537 } 538 // check if we can complete operation. If so race to establish winner in special OR case 539 if ( count != size || ! isEmpty( cons ) || unlikely(closed) ) { 540 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering 541 unlock( mutex_lock ); 542 return false; 543 } 544 } 545 } 546 547 // if closed handle 548 if ( unlikely(closed) ) { 549 unlock( mutex_lock ); 550 __handle_select_closed_write( this, node ); 551 return true; 552 } 553 554 // handle blocked consumer case via handoff (buffer is implicitly empty) 555 ConsEmpty: 556 if ( ! isEmpty( cons ) ) { 557 if ( ! __handle_waituntil_OR( cons ) ) break ConsEmpty; 558 __cons_handoff( *chan, elem ); 559 __set_avail_then_unlock( node, mutex_lock ); 560 return true; 561 } 562 563 // insert node in list if buffer is full, work will be completed by someone else 564 if ( count == size ) { 565 #ifdef CHAN_STATS 566 p_blocks++; 567 #endif 568 569 insert_last( prods, node ); 570 unlock( mutex_lock ); 571 return false; 572 } // if 573 574 // otherwise carry out write either via normal insert 575 __buf_insert( *chan, elem ); 576 __set_avail_then_unlock( node, mutex_lock ); 577 return true; 578 } 579 bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( *this.chan, node ); } 580 581 bool on_selected( chan_write(T) & this, select_node & node ) with(this) { 582 if ( unlikely(node.extra == 0p) ) { 583 if ( ! exception_in_flight() ) __closed_insert( *chan, elem ); // check if woken up due to closed channel 584 else return false; 585 } 586 // This is only reachable if not closed or closed exception was handled 587 return true; 588 } 589 } // distribution 590 591 -
libcfa/src/concurrency/cofor.hfa
rf85de47 r65bd3c2 33 33 34 34 void main( cofor_runner & this ) with(this) { 35 while ( ! done || !items`isEmpty) {35 while ( ! done || ! isEmpty( items ) ) { 36 36 lock( mutex_lock ); 37 runner_node * node = & try_pop_front( items );37 runner_node * node = &remove_first( items ); 38 38 unlock( mutex_lock ); 39 if ( ! node )39 if ( ! node ) 40 40 continue; 41 41 func( node->value ); -
libcfa/src/concurrency/coroutine.cfa
rf85de47 r65bd3c2 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Sep 18 21:47:12 202313 // Update Count : 2512 // Last Modified On : Fri Apr 25 06:48:19 2025 13 // Update Count : 31 14 14 // 15 15 … … 82 82 // helper for popping from coroutine's ehm buffer 83 83 static nonlocal_exception * pop_ehm_head( coroutine$ * this ) { 84 85 86 87 84 lock( this->ehm_state.buffer_lock __cfaabi_dbg_ctx2 ); 85 nonlocal_exception * nl_ex = pop_head( this->ehm_state.ehm_buffer ); 86 unlock( this->ehm_state.buffer_lock ); 87 return nl_ex; 88 88 } 89 89 … … 97 97 98 98 void __stack_prepare( __stack_info_t * this, size_t create_size ); 99 static void __stack_clean 99 static void __stack_clean( __stack_info_t * this ); 100 100 101 101 //----------------------------------------------------------------------------- … … 105 105 106 106 // Did we get a piece of storage ? 107 if ( this.storage || storageSize != 0) {107 if ( this.storage || storageSize != 0 ) { 108 108 // We either got a piece of storage or the user asked for a specific size 109 109 // Immediately create the stack … … 128 128 state = Start; 129 129 starter = 0p; 130 last = 0p;130 this.last = 0p; 131 131 cancellation = 0p; 132 ehm_state.ehm_buffer{}; 133 ehm_state.buffer_lock{}; 134 ehm_state.ehm_enabled = false; 135 } 136 137 void ^?{}(coroutine$& this) libcfa_public { 138 // handle any leftover pending non-local exceptions 139 nonlocal_exception * nl_ex = pop_ehm_head( &this ); 140 unsigned unhandled_ex = 0; 141 142 // if any leftover exceptions handle 143 while ( nl_ex != 0p ){ 144 unhandled_ex++; 145 free( nl_ex->the_exception ); 146 free( nl_ex ); 147 nl_ex = pop_ehm_head( &this ); 148 } 149 150 #ifdef __CFA_DEBUG__ 151 if ( unhandled_ex > 0 ) 152 printf( "Warning: Coroutine %p exited with %u pending nonlocal exceptions.\n", &this, unhandled_ex ); 153 #endif 154 155 if(this.state != Halted && this.state != Start && this.state != Primed) { 132 ehm_state.ehm_buffer{}; 133 ehm_state.buffer_lock{}; 134 ehm_state.ehm_enabled = false; 135 } 136 137 void ^?{}( coroutine$ & this ) libcfa_public { 138 // handle any leftover pending non-local exceptions 139 nonlocal_exception * nl_ex = pop_ehm_head( &this ); 140 unsigned unhandled_ex = 0; 141 142 // if any leftover exceptions handle 143 for ( ; nl_ex != 0p; nl_ex = pop_ehm_head( &this ) ) { 144 unhandled_ex++; 145 free( nl_ex->the_exception ); 146 free( nl_ex ); 147 } 148 149 #ifdef __CFA_DEBUG__ 150 if ( unhandled_ex > 0 ) 151 printf( "Warning: Coroutine %p exited with %u pending nonlocal exceptions.\n", &this, unhandled_ex ); 152 #endif 153 154 if ( this.state != Halted && this.state != Start && this.state != Primed ) { 156 155 coroutine$ * src = active_coroutine(); 157 156 coroutine$ * dst = &this; … … 174 173 // Part of the Public API 175 174 // Not inline since only ever called once per coroutine 176 forall( T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })177 void prime( T& cor) libcfa_public {178 coroutine$ * this = get_coroutine(cor);179 assert( this->state == Start);175 forall( T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); } ) 176 void prime( T & cor ) libcfa_public { 177 coroutine$ * this = get_coroutine(cor); 178 assert( this->state == Start ); 180 179 181 180 this->state = Primed; 182 resume( cor);181 resume( cor ); 183 182 } 184 183 185 184 static [void *, size_t] __stack_alloc( size_t storageSize ) { 186 185 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 187 assert( __page_size != 0l);186 assert( __page_size != 0l ); 188 187 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; 189 size = ceiling( size, __page_size);188 size = ceiling( size, __page_size ); 190 189 191 190 // If we are running debug, we also need to allocate a guardpage to catch stack overflows. … … 193 192 #if CFA_COROUTINE_USE_MMAP 194 193 storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); 195 if (storage == ((void*)-1)) {194 if (storage == ((void*)-1)) { 196 195 abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) ); 197 196 } … … 227 226 size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t); 228 227 storage = (void *)(((intptr_t)storage) - __page_size); 229 if (munmap(storage, size + __page_size) == -1) {228 if (munmap(storage, size + __page_size) == -1) { 230 229 abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) ); 231 230 } … … 248 247 void * storage; 249 248 size_t size; 250 if ( ! this->storage ) {249 if ( ! this->storage ) { 251 250 userStack = false; 252 251 [storage, size] = __stack_alloc( create_size ); … … 302 301 athrd->corctx_flag = false; 303 302 304 if (cor->state == Primed) {303 if (cor->state == Primed) { 305 304 __cfactx_suspend(); 306 305 } … … 317 316 318 317 void defaultResumeAtHandler( exception_t * except ) { 319 320 318 __cfaehm_allocate_exception( except ); 319 __cfaehm_begin_unwind( (void(*)(exception_t *))defaultTerminationHandler ); 321 320 } 322 321 … … 328 327 329 328 bool poll( coroutine$ * cor ) libcfa_public { 330 331 332 333 334 335 336 while ( nl_ex != 0p ){329 nonlocal_exception * nl_ex = pop_ehm_head( cor ); 330 331 // if no exceptions return false 332 if ( nl_ex == 0p ) return false; 333 334 // otherwise loop and throwResume all pending exceptions 335 for ( ; nl_ex != 0p; nl_ex = pop_ehm_head( cor ) ) { 337 336 ehm_cleanup ex_holder{ nl_ex->the_exception }; 338 free( nl_ex ); 339 __cfaehm_throw_resume( ex_holder.ex , defaultResumeAtHandler ); 340 341 nl_ex = pop_ehm_head( cor ); 342 } 343 344 return true; 337 free( nl_ex ); 338 __cfaehm_throw_resume( ex_holder.ex , defaultResumeAtHandler ); 339 } 340 341 return true; 345 342 } 346 343 … … 354 351 // user facing ehm operations 355 352 forall(T & | is_coroutine(T)) { 356 357 358 359 360 361 362 363 364 365 366 367 353 // enable/disable non-local exceptions 354 void enable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = true; } 355 void disable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = false; } 356 357 // poll for non-local exceptions 358 bool poll( T & cor ) libcfa_public { return poll( get_coroutine( cor ) ); } 359 360 // poll iff nonlocal ehm is enabled 361 bool checked_poll( T & cor ) libcfa_public { return get_coroutine( cor )->ehm_state.ehm_enabled ? poll( cor ) : false; } 362 363 coroutine$ * resumer( T & cor ) libcfa_public { return get_coroutine( cor )->last; } 364 coroutine$ * first_resumer( T & cor ) libcfa_public { return get_coroutine( cor )->starter; } 368 365 } 369 366 … … 371 368 forall(exceptT *, T & | ehm_resume_at( exceptT, T )) 372 369 void resumeAt( T & receiver, exceptT & ex ) libcfa_public { 373 374 375 376 377 378 379 380 370 coroutine$ * cor = get_coroutine( receiver ); 371 nonlocal_exception * nl_ex = alloc(); 372 exceptT * ex_copy = alloc(); 373 memcpy( ex_copy, &ex, sizeof(exceptT) ); 374 (*nl_ex){ (exception_t *)ex_copy }; 375 lock( cor->ehm_state.buffer_lock __cfaabi_dbg_ctx2 ); 376 append( cor->ehm_state.ehm_buffer, nl_ex ); 377 unlock( cor->ehm_state.buffer_lock ); 381 378 } 382 379 383 380 forall(exceptT * | { void $throwResume(exceptT &); }) 384 381 void resumeAt( coroutine$ * receiver, exceptT & ex ) libcfa_public { 385 386 387 388 389 390 391 382 nonlocal_exception * nl_ex = alloc(); 383 exceptT * ex_copy = alloc(); 384 memcpy( ex_copy, &ex, sizeof(exceptT) ); 385 (*nl_ex){ (exception_t *)ex_copy }; 386 lock( receiver->ehm_state.buffer_lock __cfaabi_dbg_ctx2 ); 387 append( receiver->ehm_state.ehm_buffer, nl_ex ); 388 unlock( receiver->ehm_state.buffer_lock ); 392 389 } 393 390 -
libcfa/src/concurrency/coroutine.hfa
rf85de47 r65bd3c2 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 2 11:31:42 202313 // Update Count : 1 312 // Last Modified On : Fri Apr 25 06:52:04 2025 13 // Update Count : 15 14 14 // 15 15 … … 26 26 nonlocal_exception * next; 27 27 }; 28 static inline void ?{} ( nonlocal_exception & this, exception_t * ex ) with(this) { 28 29 static inline void ?{}( nonlocal_exception & this, exception_t * ex ) with(this) { 29 30 the_exception = ex; 30 next = 0p;31 this.next = 0p; 31 32 } 32 33 … … 66 67 // void ^?{}( coStack_t & this ); 67 68 68 void 69 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ); 69 70 void ^?{}( coroutine$ & this ); 70 71 -
libcfa/src/concurrency/future.hfa
rf85de47 r65bd3c2 10 10 // Created On : Wed Jan 06 17:33:18 2021 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Mar 2 14:45:56202513 // Update Count : 1912 // Last Modified On : Wed Apr 23 22:41:10 2025 13 // Update Count : 22 14 14 // 15 15 … … 63 63 void reset( future(T) & this ) with(this) { 64 64 lock( lock ); 65 if ( ! waiters`isEmpty)65 if ( ! isEmpty( waiters ) ) 66 66 abort("Attempting to reset a future with blocked waiters"); 67 67 state = FUTURE_EMPTY; … … 82 82 83 83 bool fulfil$( future(T) & this ) with(this) { // helper 84 bool ret_val = ! waiters`isEmpty;84 bool ret_val = ! isEmpty( waiters ); 85 85 state = FUTURE_FULFILLED; 86 while ( ! waiters`isEmpty) {86 while ( ! isEmpty( waiters ) ) { 87 87 if ( !__handle_waituntil_OR( waiters ) ) // handle special waituntil OR case 88 88 break; // if handle_OR returns false then waiters is empty so break 89 select_node &s = try_pop_front( waiters );89 select_node &s = remove_first( waiters ); 90 90 91 91 if ( s.clause_status == 0p ) // poke in result so that woken threads do not need to reacquire any locks … … 208 208 209 209 bool unregister_select( future(T) & this, select_node & s ) with(this) { 210 if ( ! s`isListed) return false;211 lock( lock ); 212 if ( s`isListed) remove( s );210 if ( ! isListed( s ) ) return false; 211 lock( lock ); 212 if ( isListed( s ) ) remove( s ); 213 213 unlock( lock ); 214 214 return false; -
libcfa/src/concurrency/invoke.h
rf85de47 r65bd3c2 10 10 // Created On : Tue Jan 17 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Wed A ug 30 21:27:51 202313 // Update Count : 6 012 // Last Modified On : Wed Apr 23 15:27:18 2025 13 // Update Count : 61 14 14 // 15 15 … … 259 259 } 260 260 261 static inline thread$ * volatile & ?`next( thread$ * this ) {261 static inline thread$ * volatile & next( thread$ * this ) { 262 262 return this->user_link.next; 263 263 } -
libcfa/src/concurrency/io.cfa
rf85de47 r65bd3c2 95 95 static inline void __post(oneshot & this, bool kernel, unpark_hint hint) { 96 96 thread$ * t = post( this, false ); 97 if (kernel) __kernel_unpark( t, hint );97 if (kernel) __kernel_unpark( t, hint ); 98 98 else unpark( t, hint ); 99 99 } … … 108 108 // do the system call in a loop, repeat on interrupts 109 109 ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, min_comp, flags, (sigset_t *)0p, _NSIG / 8); 110 if ( ret < 0 ) {110 if ( ret < 0 ) { 111 111 switch((int)errno) { 112 112 case EINTR: … … 154 154 const __u32 tail = *ctx->cq.tail; 155 155 156 if (head == tail) return false;156 if (head == tail) return false; 157 157 } 158 158 159 159 // try a simple spinlock acquire, it's likely there are completions to drain 160 if (!__atomic_try_acquire(&ctx->cq.try_lock)) {160 if ( ! __atomic_try_acquire(&ctx->cq.try_lock)) { 161 161 // some other processor already has it 162 162 __STATS__( false, io.calls.locked++; ) … … 214 214 215 215 // we finished draining the completions... unless the ring buffer was full and there are more secret completions in the kernel. 216 if (likely(count < num)) break;216 if (likely(count < num)) break; 217 217 218 218 // the ring buffer was full, there could be more stuff in the kernel. … … 243 243 244 244 // if submitting must be submitted, do the system call 245 if (ctx.sq.to_submit != 0) {245 if (ctx.sq.to_submit != 0) { 246 246 ioring_syscsll(ctx, 0, 0); 247 247 } … … 278 278 // only help once every other time 279 279 // pick a target when not helping 280 if (proc->io.target == UINT_MAX) {280 if (proc->io.target == UINT_MAX) { 281 281 uint64_t chaos = __tls_rand(); 282 282 // choose who to help and whether to accept helping far processors … … 285 285 286 286 // if the processor is on the same cache line or is lucky ( 3 out of 256 odds ) help it 287 if (ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) {287 if (ext < 3 || __atomic_load_n(&caches[other / __shard_factor.io].id, __ATOMIC_RELAXED) == this_cache) { 288 288 proc->io.target = other; 289 289 } … … 294 294 /* paranoid */ verify( io.tscs[target].t.tv != ULLONG_MAX ); 295 295 // make sure the target hasn't stopped existing since last time 296 HELP: if (target < ctxs_count) {296 HELP: if (target < ctxs_count) { 297 297 // calculate it's age and how young it could be before we give up on helping 298 298 const __readyQ_avg_t cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io, false); … … 300 300 __cfadbg_print_safe(io, "Kernel I/O: Help attempt on %u from %u, age %'llu vs cutoff %'llu, %s\n", target, ctx->cq.id, age, cutoff, age > cutoff ? "yes" : "no"); 301 301 // is the target older than the cutoff, recall 0 is oldest and bigger ints are younger 302 if (age <= cutoff) break HELP;302 if (age <= cutoff) break HELP; 303 303 304 304 // attempt to help the submission side … … 306 306 307 307 // attempt to help the completion side 308 if (!try_acquire(io.data[target])) break HELP; // already acquire no help needed308 if ( ! try_acquire(io.data[target])) break HELP; // already acquire no help needed 309 309 310 310 // actually help 311 if (!__cfa_do_drain( io.data[target], cltr )) break HELP;311 if ( ! __cfa_do_drain( io.data[target], cltr )) break HELP; 312 312 313 313 // track we did help someone … … 322 322 323 323 // Drain the local queue 324 if (try_acquire( proc->io.ctx )) {324 if (try_acquire( proc->io.ctx )) { 325 325 local = __cfa_do_drain( proc->io.ctx, cltr ); 326 326 } … … 390 390 391 391 // If we don't have enough sqes, fail 392 if ((ftail - fhead) < want) { return false; }392 if ((ftail - fhead) < want) { return false; } 393 393 394 394 // copy all the indexes we want from the available list … … 422 422 423 423 // We can proceed to the fast path 424 if ( __alloc(ctx, idxs, want) ) {424 if ( __alloc(ctx, idxs, want) ) { 425 425 // Allocation was successful 426 426 __STATS__( true, io.alloc.fast += 1; ) … … 456 456 // barebones logic to submit a group of sqes 457 457 static inline void __submit_only( struct io_context$ * ctx, __u32 idxs[], __u32 have, bool lock) { 458 if (!lock)458 if ( ! lock) 459 459 lock( ctx->ext_sq.lock __cfaabi_dbg_ctx2 ); 460 460 // We can proceed to the fast path … … 478 478 __atomic_store_n(&ctx->proc->io.dirty , true, __ATOMIC_RELAXED); 479 479 480 if (!lock)480 if ( ! lock) 481 481 unlock( ctx->ext_sq.lock ); 482 482 } … … 487 487 __submit_only(ctx, idxs, have, false); 488 488 489 if (sq.to_submit > 30) {489 if (sq.to_submit > 30) { 490 490 __tls_stats()->io.flush.full++; 491 491 __cfa_io_flush( ctx->proc ); 492 492 } 493 if (!lazy) {493 if ( ! lazy ) { 494 494 __tls_stats()->io.flush.eager++; 495 495 __cfa_io_flush( ctx->proc ); … … 503 503 504 504 disable_interrupts(); 505 __STATS__( true, if (!lazy) io.submit.eagr += 1; )505 __STATS__( true, if ( ! lazy ) io.submit.eagr += 1; ) 506 506 struct processor * proc = __cfaabi_tls.this_processor; 507 507 io_context$ * ctx = proc->io.ctx; … … 510 510 511 511 // Can we proceed to the fast path 512 if ( ctx == inctx ) // We have the right instance?512 if ( ctx == inctx ) // We have the right instance? 513 513 { 514 514 // yes! fast submit … … 564 564 __u32 count = chead - phead; 565 565 566 if (count == 0) {566 if (count == 0) { 567 567 return 0; 568 568 } … … 594 594 lock( queue.lock __cfaabi_dbg_ctx2 ); 595 595 { 596 was_empty = queue.queue`isEmpty;596 was_empty = isEmpty( queue.queue ); 597 597 598 598 // Add our request to the list … … 632 632 // notify the arbiter that new allocations are available 633 633 static void __ioarbiter_notify( io_arbiter$ & this, io_context$ * ctx ) { 634 /* paranoid */ verify( ! this.pending.queue`isEmpty);634 /* paranoid */ verify( ! isEmpty( this.pending.queue ) ); 635 635 /* paranoid */ verify( __preemption_enabled() ); 636 636 … … 642 642 // as long as there are pending allocations try to satisfy them 643 643 // for simplicity do it in FIFO order 644 while( ! this.pending.queue`isEmpty) {644 while( ! isEmpty( this.pending.queue ) ) { 645 645 // get first pending allocs 646 646 __u32 have = ctx->sq.free_ring.tail - ctx->sq.free_ring.head; 647 __pending_alloc & pa = (__pending_alloc&)( this.pending.queue`first);647 __pending_alloc & pa = (__pending_alloc&)( first( this.pending.queue )); 648 648 649 649 // check if we have enough to satisfy the request 650 if ( have > pa.want ) goto DONE;650 if ( have > pa.want ) goto DONE; 651 651 652 652 // if there are enough allocations it means we can drop the request 653 try_pop_front( this.pending.queue );653 remove_first( this.pending.queue ); 654 654 655 655 /* paranoid */__attribute__((unused)) bool ret = … … 676 676 // short hand to avoid the mutual exclusion of the pending is empty regardless 677 677 static void __ioarbiter_notify( io_context$ & ctx ) { 678 if (empty( ctx.arbiter->pending )) return;678 if (empty( ctx.arbiter->pending )) return; 679 679 __ioarbiter_notify( *ctx.arbiter, &ctx ); 680 680 } … … 700 700 // if this is the first to be enqueued, signal the processor in an attempt to speed up flushing 701 701 // if it's not the first enqueue, a signal is already in transit 702 if ( we ) {702 if ( we ) { 703 703 sigval_t value = { PREEMPT_IO }; 704 704 __cfaabi_pthread_sigqueue(ctx->proc->kernel_thread, SIGUSR1, value); … … 716 716 static void __ioarbiter_flush( io_context$ & ctx, bool kernel ) { 717 717 // if there are no external operations just return 718 if (empty( ctx.ext_sq )) return;718 if ( empty( ctx.ext_sq ) ) return; 719 719 720 720 // stats and logs … … 727 727 // pop each operation one at a time. 728 728 // There is no wait morphing because of the io sq ring 729 while( ! ctx.ext_sq.queue`isEmpty) {729 while( ! isEmpty( ctx.ext_sq.queue ) ) { 730 730 // drop the element from the queue 731 __external_io & ei = (__external_io&) try_pop_front( ctx.ext_sq.queue );731 __external_io & ei = (__external_io&)remove_first( ctx.ext_sq.queue ); 732 732 733 733 // submit it -
libcfa/src/concurrency/kernel.cfa
rf85de47 r65bd3c2 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 9 08:42:05 202313 // Update Count : 7712 // Last Modified On : Fri Apr 25 07:02:42 2025 13 // Update Count : 82 14 14 // 15 15 … … 45 45 #pragma GCC diagnostic pop 46 46 47 #if ! defined(__CFA_NO_STATISTICS__)47 #if ! defined(__CFA_NO_STATISTICS__) 48 48 #define __STATS_DEF( ...) __VA_ARGS__ 49 49 #else … … 158 158 159 159 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 160 #if ! defined(__CFA_NO_STATISTICS__)161 if ( this->print_halts ) {160 #if ! defined(__CFA_NO_STATISTICS__) 161 if ( this->print_halts ) { 162 162 __cfaabi_bits_print_safe( STDOUT_FILENO, "Processor : %d - %s (%p)\n", this->unique_id, this->name, (void*)this); 163 163 } … … 169 169 170 170 // if we need to run some special setup, now is the time to do it. 171 if (this->init.thrd) {171 if (this->init.thrd) { 172 172 this->init.thrd->curr_cluster = this->cltr; 173 173 __run_thread(this, this->init.thrd); … … 185 185 readyThread = __next_thread( this->cltr ); 186 186 187 if ( !readyThread ) {187 if ( ! readyThread ) { 188 188 // there is no point in holding submissions if we are idle 189 189 __IO_STATS__(true, io.flush.idle++; ) … … 196 196 } 197 197 198 if ( !readyThread ) for(5) {198 if ( ! readyThread ) for(5) { 199 199 readyThread = __next_thread_slow( this->cltr ); 200 200 201 if ( readyThread ) break;201 if ( readyThread ) break; 202 202 203 203 // It's unlikely we still I/O to submit, but the arbiter could … … 210 210 211 211 HALT: 212 if ( !readyThread ) {212 if ( ! readyThread ) { 213 213 // Don't block if we are done 214 if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;214 if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 215 215 216 216 // Push self to idle stack 217 if (!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP;217 if ( ! mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP; 218 218 219 219 // Confirm the ready-queue is empty 220 220 readyThread = __next_thread_search( this->cltr ); 221 if ( readyThread ) {221 if ( readyThread ) { 222 222 // A thread was found, cancel the halt 223 223 mark_awake(this->cltr->procs, * this); … … 247 247 248 248 // Are we done? 249 if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;250 251 if (__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && !__atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) {249 if ( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 250 251 if (__atomic_load_n(&this->io.pending, __ATOMIC_RELAXED) && ! __atomic_load_n(&this->io.dirty, __ATOMIC_RELAXED)) { 252 252 __IO_STATS__(true, io.flush.dirty++; ) 253 253 __cfa_io_flush( this ); … … 263 263 post( this->terminated ); 264 264 265 if (this == mainProcessor) {265 if (this == mainProcessor) { 266 266 // HACK : the coroutine context switch expects this_thread to be set 267 267 // and it make sense for it to be set in all other cases except here … … 294 294 295 295 // Actually run the thread 296 RUNNING: while(true) { 296 RUNNING: 297 while( true ) { 297 298 thrd_dst->preempted = __NO_PREEMPTION; 298 299 … … 339 340 // In case 2, we lost the race so we now own the thread. 340 341 341 if (unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {342 if (unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 342 343 // Reset the this_thread now that we know 343 344 // the state isn't active anymore … … 349 350 } 350 351 351 if (unlikely(thrd_dst->state == Halting)) {352 if (unlikely(thrd_dst->state == Halting)) { 352 353 // Reset the this_thread now that we know 353 354 // the state isn't active anymore … … 418 419 } 419 420 420 #if ! defined(__CFA_NO_STATISTICS__)421 #if ! defined(__CFA_NO_STATISTICS__) 421 422 /* paranoid */ verify( thrd_src->last_proc != 0p ); 422 if (thrd_src->last_proc != kernelTLS().this_processor) {423 if (thrd_src->last_proc != kernelTLS().this_processor) { 423 424 __tls_stats()->ready.threads.migration++; 424 425 } … … 440 441 /* paranoid */ verify( thrd->curr_cluster ); 441 442 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 442 /* paranoid */ if ( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,443 /* paranoid */ if ( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 443 444 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 444 /* paranoid */ if ( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active,445 /* paranoid */ if ( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active, 445 446 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 446 447 /* paranoid */ #endif … … 463 464 __wake_one( cl ); 464 465 465 #if ! defined(__CFA_NO_STATISTICS__)466 if ( kernelTLS().this_stats ) {466 #if ! defined(__CFA_NO_STATISTICS__) 467 if ( kernelTLS().this_stats ) { 467 468 __tls_stats()->ready.threads.threads++; 468 if (outside) {469 if (outside) { 469 470 __tls_stats()->ready.threads.extunpark++; 470 471 } … … 542 543 /* paranoid */ verify( ready_schedule_islocked()); 543 544 544 if ( !thrd ) return;545 546 if (__must_unpark(thrd)) {545 if ( ! thrd ) return; 546 547 if (__must_unpark(thrd)) { 547 548 // Wake lost the race, 548 549 __schedule_thread( thrd, hint ); … … 554 555 555 556 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { 556 if ( !thrd ) return;557 558 if (__must_unpark(thrd)) {557 if ( ! thrd ) return; 558 559 if (__must_unpark(thrd)) { 559 560 disable_interrupts(); 560 561 // Wake lost the race, … … 592 593 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 593 594 594 if ( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); }595 if ( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); }596 if ( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); }595 if ( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } 596 if ( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); } 597 if ( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } 597 598 598 599 thrd->state = Halting; … … 618 619 // If that is the case, abandon the preemption. 619 620 bool preempted = false; 620 if (thrd->rdy_link.next == 0p) {621 if (thrd->rdy_link.next == 0p) { 621 622 preempted = true; 622 623 thrd->preempted = reason; … … 641 642 642 643 // If no one is sleeping: we are done 643 if ( fdp == 0p ) return;644 if ( fdp == 0p ) return; 644 645 645 646 int fd = 1; 646 if ( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) {647 if ( __atomic_load_n(&fdp->sem, __ATOMIC_SEQ_CST) != 1 ) { 647 648 fd = __atomic_exchange_n(&fdp->sem, 1, __ATOMIC_RELAXED); 648 649 } … … 652 653 case 0: 653 654 // If the processor isn't ready to sleep then the exchange will already wake it up 654 #if ! defined(__CFA_NO_STATISTICS__)655 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++;655 #if ! defined(__CFA_NO_STATISTICS__) 656 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.early++; 656 657 } else { __atomic_fetch_add(&this->stats->ready.sleep.early, 1, __ATOMIC_RELAXED); } 657 658 #endif … … 659 660 case 1: 660 661 // If someone else already said they will wake them: we are done 661 #if ! defined(__CFA_NO_STATISTICS__)662 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++;662 #if ! defined(__CFA_NO_STATISTICS__) 663 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.seen++; 663 664 } else { __atomic_fetch_add(&this->stats->ready.sleep.seen, 1, __ATOMIC_RELAXED); } 664 665 #endif … … 670 671 /* paranoid */ verifyf( ret == 0, "Expected return to be 0, was %d\n", ret ); 671 672 672 #if ! defined(__CFA_NO_STATISTICS__)673 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++;673 #if ! defined(__CFA_NO_STATISTICS__) 674 if ( kernelTLS().this_stats ) { __tls_stats()->ready.sleep.wakes++; 674 675 } else { __atomic_fetch_add(&this->stats->ready.sleep.wakes, 1, __ATOMIC_RELAXED); } 675 676 #endif … … 710 711 711 712 // Someone already told us to wake-up! No time for a nap. 712 if (expected == 1) { return; }713 if (expected == 1) { return; } 713 714 714 715 // Try to mark that we are going to sleep 715 if (__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) {716 if (__atomic_compare_exchange_n(&this->idle_wctx.sem, &expected, this->idle_wctx.evfd, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) ) { 716 717 // Every one agreed, taking a nap 717 718 break; … … 720 721 721 722 722 #if ! defined(__CFA_NO_STATISTICS__)723 if (this->print_halts) {723 #if ! defined(__CFA_NO_STATISTICS__) 724 if (this->print_halts) { 724 725 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 725 726 } … … 731 732 eventfd_t val; 732 733 ssize_t ret = read( this->idle_wctx.evfd, &val, sizeof(val) ); 733 if (ret < 0) {734 if (ret < 0) { 734 735 switch((int)errno) { 735 736 case EAGAIN: … … 746 747 } 747 748 748 #if ! defined(__CFA_NO_STATISTICS__)749 if (this->print_halts) {749 #if ! defined(__CFA_NO_STATISTICS__) 750 if (this->print_halts) { 750 751 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 751 752 } … … 759 760 760 761 /* paranoid */ verify( ! __preemption_enabled() ); 761 if (!try_lock( this )) return false;762 if ( ! try_lock( this )) return false; 762 763 this.idle++; 763 764 /* paranoid */ verify( this.idle <= this.total ); … … 784 785 // update the pointer to the head wait context 785 786 struct __fd_waitctx * wctx = 0; 786 if (!this.idles`isEmpty) wctx = &this.idles`first.idle_wctx;787 if ( ! isEmpty( this.idles )) wctx = &first( this. idles ).idle_wctx; 787 788 __atomic_store_n(&this.fdw, wctx, __ATOMIC_SEQ_CST); 788 789 } … … 798 799 thread$ * thrd = __cfaabi_tls.this_thread; 799 800 800 if (thrd) {801 if (thrd) { 801 802 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 802 803 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); … … 847 848 //----------------------------------------------------------------------------- 848 849 // Statistics 849 #if ! defined(__CFA_NO_STATISTICS__)850 #if ! defined(__CFA_NO_STATISTICS__) 850 851 void print_halts( processor & this ) libcfa_public { 851 852 this.print_halts = true; … … 855 856 /* paranoid */ verify( cltr->stats ); 856 857 857 processor * it = & list`first;858 processor * it = &first( list ); 858 859 for(unsigned i = 0; i < count; i++) { 859 860 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); … … 861 862 // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it ); 862 863 __tally_stats( cltr->stats, it->local_data->this_stats ); 863 it = & (*it)`next;864 it = &next( *it ); 864 865 } 865 866 } -
libcfa/src/concurrency/kernel/cluster.cfa
rf85de47 r65bd3c2 234 234 235 235 static void assign_list(unsigned & valrq, unsigned & valio, dlist(struct processor) & list, unsigned count) { 236 struct processor * it = & list`first;236 struct processor * it = &first( list ); 237 237 for(unsigned i = 0; i < count; i++) { 238 238 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); … … 245 245 valio += __shard_factor.io; 246 246 #endif 247 it = & (*it)`next;247 it = &next( *it ); 248 248 } 249 249 } … … 258 258 #if defined(CFA_HAVE_LINUX_IO_URING_H) 259 259 static void assign_io(io_context$ ** data, size_t count, dlist(struct processor) & list) { 260 struct processor * it = & list`first;260 struct processor * it = &first( list ); 261 261 while(it) { 262 262 /* paranoid */ verifyf( it, "Unexpected null iterator\n"); 263 263 /* paranoid */ verifyf( it->io.ctx->cq.id < count, "Processor %p has id %u above count %zu\n", it, it->rdq.id, count); 264 264 data[it->io.ctx->cq.id] = it->io.ctx; 265 it = & (*it)`next;265 it = &next( *it ); 266 266 } 267 267 } -
libcfa/src/concurrency/kernel/private.hfa
rf85de47 r65bd3c2 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Mar 2 16:04:46 202313 // Update Count : 1 112 // Last Modified On : Mon Apr 21 18:08:48 2025 13 // Update Count : 12 14 14 // 15 15 … … 287 287 static inline [unsigned, uint_fast32_t] ready_mutate_register() { 288 288 unsigned id = register_proc_id(); 289 uint_fast32_t last = ready_mutate_lock(); 290 return [id, last]; 289 return [id, ready_mutate_lock()]; 291 290 } 292 291 -
libcfa/src/concurrency/kernel/startup.cfa
rf85de47 r65bd3c2 69 69 //----------------------------------------------------------------------------- 70 70 // Start and stop routine for the kernel, declared first to make sure they run first 71 static void __kernel_startup 72 static void __kernel_shutdown(void) __attribute__(( destructor 71 static void __kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 72 static void __kernel_shutdown(void) __attribute__(( destructor( STARTUP_PRIORITY_KERNEL ) )); 73 73 74 74 //----------------------------------------------------------------------------- … … 78 78 static void * __invoke_processor(void * arg); 79 79 static void __kernel_first_resume( processor * this ); 80 static void __kernel_last_resume 80 static void __kernel_last_resume( processor * this ); 81 81 static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT); 82 82 static void deinit(processor & this); … … 99 99 extern void __kernel_alarm_shutdown(void); 100 100 extern void __cfa_io_start( processor * ); 101 extern void __cfa_io_stop 101 extern void __cfa_io_stop( processor * ); 102 102 103 103 //----------------------------------------------------------------------------- … … 110 110 //----------------------------------------------------------------------------- 111 111 // Kernel storage 112 KERNEL_STORAGE(cluster, 113 KERNEL_STORAGE(processor, 114 KERNEL_STORAGE(thread$, 115 KERNEL_STORAGE(__stack_t, 112 KERNEL_STORAGE(cluster, mainCluster); 113 KERNEL_STORAGE(processor, mainProcessor); 114 KERNEL_STORAGE(thread$, mainThread); 115 KERNEL_STORAGE(__stack_t, mainThreadCtx); 116 116 #if !defined(__CFA_NO_STATISTICS__) 117 117 KERNEL_STORAGE(__stats_t, mainProcStats); 118 118 #endif 119 119 120 cluster 121 processor 122 thread$ 120 cluster * mainCluster libcfa_public; 121 processor * mainProcessor; 122 thread$ * mainThread; 123 123 124 124 extern "C" { … … 150 150 // Struct to steal stack 151 151 struct current_stack_info_t { 152 __stack_t * storage; 153 void * base; 154 void * limit; 155 void * context; 152 __stack_t * storage; // pointer to stack object 153 void * base; // base of stack 154 void * limit; // stack grows towards stack limit 155 void * context; // address of cfa_context_t 156 156 }; 157 157 … … 234 234 //initialize the global state variables 235 235 __cfaabi_tls.this_processor = mainProcessor; 236 __cfaabi_tls.this_thread 236 __cfaabi_tls.this_thread = mainThread; 237 237 238 238 #if !defined( __CFA_NO_STATISTICS__ ) … … 355 355 processor * proc = (processor *) arg; 356 356 __cfaabi_tls.this_processor = proc; 357 __cfaabi_tls.this_thread 357 __cfaabi_tls.this_thread = 0p; 358 358 __cfaabi_tls.preemption_state.[enabled, disable_count] = [false, 1]; 359 359 proc->local_data = &__cfaabi_tls; … … 477 477 stack.storage = info->storage; 478 478 with(*stack.storage) { 479 limit 480 base 479 limit = info->limit; 480 base = info->base; 481 481 } 482 482 __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage; … … 485 485 state = Start; 486 486 starter = 0p; 487 last = 0p;487 this.last = 0p; 488 488 cancellation = 0p; 489 490 491 489 ehm_state.ehm_buffer{}; 490 ehm_state.buffer_lock{}; 491 ehm_state.ehm_enabled = false; 492 492 } 493 493 … … 502 502 self_mon_p = &self_mon; 503 503 rdy_link.next = 0p; 504 rdy_link.ts 504 rdy_link.ts = MAX; 505 505 user_link.next = 0p; 506 506 user_link.prev = 0p; … … 509 509 preferred = ready_queue_new_preferred(); 510 510 last_proc = 0p; 511 PRNG_SET_SEED( random_state, 511 PRNG_SET_SEED( random_state, __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() ); 512 512 #if defined( __CFA_WITH_VERIFY__ ) 513 513 executing = 0p; … … 531 531 this.name = name; 532 532 this.cltr = &_cltr; 533 533 __atomic_add_fetch( &_cltr.procs.constructed, 1u, __ATOMIC_RELAXED ); 534 534 this.rdq.its = 0; 535 535 this.rdq.itr = 0; 536 this.rdq.id 536 this.rdq.id = 0; 537 537 this.rdq.target = MAX; 538 538 this.rdq.last = MAX; … … 545 545 this.io.ctx = 0p; 546 546 this.io.pending = false; 547 this.io.dirty 547 this.io.dirty = false; 548 548 549 549 this.init.thrd = initT; … … 599 599 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); 600 600 601 601 __atomic_sub_fetch( &this.cltr->procs.constructed, 1u, __ATOMIC_RELAXED ); 602 602 603 603 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED); … … 619 619 // Cluster 620 620 static void ?{}(__cluster_proc_list & this) { 621 this.fdw 622 this.idle 623 621 this.fdw = 0p; 622 this.idle = 0; 623 this.constructed = 0; 624 624 this.total = 0; 625 625 } … … 706 706 //----------------------------------------------------------------------------- 707 707 // Global Queues 708 static void doregister( cluster 709 lock 708 static void doregister( cluster & cltr ) { 709 lock( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 710 710 push_front( __cfa_dbg_global_clusters.list, cltr ); 711 unlock 712 } 713 714 static void unregister( cluster 715 lock 711 unlock( __cfa_dbg_global_clusters.lock ); 712 } 713 714 static void unregister( cluster & cltr ) { 715 lock( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 716 716 remove( __cfa_dbg_global_clusters.list, cltr ); 717 717 unlock( __cfa_dbg_global_clusters.lock ); … … 719 719 720 720 void doregister( cluster * cltr, thread$ & thrd ) { 721 lock 721 lock(cltr->thread_list_lock __cfaabi_dbg_ctx2); 722 722 cltr->nthreads += 1; 723 723 insert_first(cltr->threads, thrd); 724 unlock 724 unlock(cltr->thread_list_lock); 725 725 } 726 726 727 727 void unregister( cluster * cltr, thread$ & thrd ) { 728 lock 728 lock(cltr->thread_list_lock __cfaabi_dbg_ctx2); 729 729 { 730 730 tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) = void; -
libcfa/src/concurrency/locks.cfa
rf85de47 r65bd3c2 79 79 // lock is held by some other thread 80 80 if ( owner != 0p && owner != thrd ) { 81 81 select_node node; 82 82 insert_last( blocked_threads, node ); 83 83 wait_count++; 84 84 unlock( lock ); 85 85 park( ); 86 86 return; 87 87 } else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread 88 88 recursion_count++; … … 91 91 recursion_count = 1; 92 92 } 93 93 unlock( lock ); 94 94 } 95 95 … … 115 115 116 116 static inline void pop_node( blocking_lock & this ) with( this ) { 117 118 select_node * node = &try_pop_front( blocked_threads );119 120 121 122 123 124 125 126 127 128 117 __handle_waituntil_OR( blocked_threads ); 118 select_node * node = &remove_first( blocked_threads ); 119 if ( node ) { 120 wait_count--; 121 owner = node->blocked_thread; 122 recursion_count = 1; 123 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 124 wake_one( blocked_threads, *node ); 125 } else { 126 owner = 0p; 127 recursion_count = 0; 128 } 129 129 } 130 130 … … 160 160 unpark( t ); 161 161 } 162 162 unlock( lock ); 163 163 } 164 164 … … 172 172 pop_node( this ); 173 173 174 175 176 unlock( lock ); 177 178 174 select_node node; 175 active_thread()->link_node = (void *)&node; 176 unlock( lock ); 177 178 pre_park_then_park( pp_fn, pp_datum ); 179 179 180 180 return ret; … … 187 187 // waituntil() support 188 188 bool register_select( blocking_lock & this, select_node & node ) with(this) { 189 189 lock( lock __cfaabi_dbg_ctx2 ); 190 190 thread$ * thrd = active_thread(); 191 191 … … 193 193 /* paranoid */ verifyf( owner != thrd || multi_acquisition, "Single acquisition lock holder (%p) attempted to reacquire the lock %p resulting in a deadlock.", owner, &this ); 194 194 195 196 197 198 199 200 195 if ( !node.park_counter && ( (owner == thrd && multi_acquisition) || owner == 0p ) ) { // OR special case 196 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 197 unlock( lock ); 198 return false; 199 } 200 } 201 201 202 202 // lock is held by some other thread … … 205 205 wait_count++; 206 206 unlock( lock ); 207 207 return false; 208 208 } else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread 209 209 recursion_count++; … … 213 213 } 214 214 215 216 217 215 if ( node.park_counter ) __make_select_node_available( node ); 216 unlock( lock ); 217 return true; 218 218 } 219 219 220 220 bool unregister_select( blocking_lock & this, select_node & node ) with(this) { 221 222 if ( node`isListed) {223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 unlock( lock ); 238 221 lock( lock __cfaabi_dbg_ctx2 ); 222 if ( isListed( node ) ) { 223 remove( node ); 224 wait_count--; 225 unlock( lock ); 226 return false; 227 } 228 229 if ( owner == active_thread() ) { 230 /* paranoid */ verifyf( recursion_count == 1 || multi_acquisition, "Thread %p attempted to unlock owner lock %p in waituntil unregister, which is not recursive but has a recursive count of %zu", active_thread(), &this, recursion_count ); 231 // if recursion count is zero release lock and set new owner if one is waiting 232 recursion_count--; 233 if ( recursion_count == 0 ) { 234 pop_node( this ); 235 } 236 } 237 unlock( lock ); 238 return false; 239 239 } 240 240 … … 265 265 // may still be called after a thread has been removed from the queue but 266 266 // before the alarm is unregistered 267 if ( (*info_thd)`isListed ) {// is thread on queue267 if ( isListed( *info_thd ) ) { // is thread on queue 268 268 info_thd->signalled = false; 269 269 // remove this thread O(1) 270 270 remove( *info_thd ); 271 271 cond->count--; 272 if ( info_thd->lock ) {272 if ( info_thd->lock ) { 273 273 // call lock's on_notify if a lock was passed 274 274 on_notify(*info_thd->lock, info_thd->t); … … 304 304 // may still be called after a thread has been removed from the queue but 305 305 // before the alarm is unregistered 306 if ( (*info_thd)`isListed ) {// is thread on queue306 if ( isListed( *info_thd ) ) { // is thread on queue 307 307 info_thd->signalled = false; 308 308 // remove this thread O(1) … … 332 332 333 333 static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) { 334 if (&popped != 0p) {334 if (&popped != 0p) { 335 335 popped.signalled = true; 336 336 count--; … … 347 347 bool notify_one( condition_variable(L) & this ) with( this ) { 348 348 lock( lock __cfaabi_dbg_ctx2 ); 349 bool ret = ! blocked_threads`isEmpty;350 process_popped(this, try_pop_front( blocked_threads ));349 bool ret = ! isEmpty( blocked_threads ); 350 process_popped(this, remove_first( blocked_threads )); 351 351 unlock( lock ); 352 352 return ret; … … 355 355 bool notify_all( condition_variable(L) & this ) with(this) { 356 356 lock( lock __cfaabi_dbg_ctx2 ); 357 bool ret = ! blocked_threads`isEmpty;358 while( ! blocked_threads`isEmpty) {359 process_popped(this, try_pop_front( blocked_threads ));357 bool ret = ! isEmpty( blocked_threads ); 358 while( ! isEmpty( blocked_threads ) ) { 359 process_popped(this, remove_first( blocked_threads )); 360 360 } 361 361 unlock( lock ); … … 364 364 365 365 uintptr_t front( condition_variable(L) & this ) with(this) { 366 return blocked_threads`isEmpty ? NULL : blocked_threads`first.info;366 return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info; 367 367 } 368 368 369 369 bool empty( condition_variable(L) & this ) with(this) { 370 370 lock( lock __cfaabi_dbg_ctx2 ); 371 bool ret = blocked_threads`isEmpty;371 bool ret = isEmpty( blocked_threads ); 372 372 unlock( lock ); 373 373 return ret; … … 382 382 } 383 383 384 385 384 static size_t block_and_get_recursion( info_thread(L) & i, __cfa_pre_park pp_fn, void * pp_datum ) { 385 size_t recursion_count = 0; 386 386 if ( i.lock ) // if lock was passed get recursion count to reset to after waking thread 387 387 recursion_count = on_wait( *i.lock, pp_fn, pp_datum ); // this call blocks 388 388 else 389 390 391 392 389 pre_park_then_park( pp_fn, pp_datum ); 390 return recursion_count; 391 } 392 static size_t block_and_get_recursion( info_thread(L) & i ) { return block_and_get_recursion( i, pre_park_noop, 0p ); } 393 393 394 394 // helper for wait()'s' with no timeout 395 395 static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 396 396 lock( lock __cfaabi_dbg_ctx2 ); 397 397 enqueue_thread( this, &i ); 398 398 unlock( lock ); 399 399 400 400 // blocks here 401 401 size_t recursion_count = block_and_get_recursion( i ); 402 402 403 403 // resets recursion count here after waking … … 409 409 queue_info_thread( this, i ); 410 410 411 411 static void cond_alarm_register( void * node_ptr ) { register_self( (alarm_node_t *)node_ptr ); } 412 412 413 413 // helper for wait()'s' with a timeout 414 414 static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 415 415 lock( lock __cfaabi_dbg_ctx2 ); 416 416 enqueue_thread( this, &info ); 417 417 alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info }; 418 418 unlock( lock ); 419 419 420 420 // blocks here and registers alarm node before blocking after releasing locks to avoid deadlock 421 421 size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) ); 422 422 // park(); 423 423 … … 434 434 return i.signalled; 435 435 436 void wait( condition_variable(L) & this ) with(this) { WAIT( 0, 0p) }437 void wait( condition_variable(L) & this, uintptr_t info 438 void wait( condition_variable(L) & this, L & l ) with(this) { WAIT( 0, &l) }436 void wait( condition_variable(L) & this ) with(this) { WAIT( 0, 0p ) } 437 void wait( condition_variable(L) & this, uintptr_t info ) with(this) { WAIT( info, 0p ) } 438 void wait( condition_variable(L) & this, L & l ) with(this) { WAIT( 0, &l ) } 439 439 void wait( condition_variable(L) & this, L & l, uintptr_t info ) with(this) { WAIT( info, &l ) } 440 440 441 bool wait( condition_variable(L) & this, Duration duration ) with(this) { WAIT_TIME( 0, 0p , duration ) }442 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration 443 bool wait( condition_variable(L) & this, L & l, Duration duration ) with(this) { WAIT_TIME( 0, &l , duration ) }441 bool wait( condition_variable(L) & this, Duration duration ) with(this) { WAIT_TIME( 0 , 0p , duration ) } 442 bool wait( condition_variable(L) & this, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, 0p , duration ) } 443 bool wait( condition_variable(L) & this, L & l, Duration duration ) with(this) { WAIT_TIME( 0 , &l , duration ) } 444 444 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, &l , duration ) } 445 445 446 446 //----------------------------------------------------------------------------- 447 447 // fast_cond_var 448 void 448 void ?{}( fast_cond_var(L) & this ){ 449 449 this.blocked_threads{}; 450 450 #ifdef __CFA_DEBUG__ … … 455 455 456 456 bool notify_one( fast_cond_var(L) & this ) with(this) { 457 bool ret = ! blocked_threads`isEmpty;457 bool ret = ! isEmpty( blocked_threads ); 458 458 if ( ret ) { 459 info_thread(L) & popped = try_pop_front( blocked_threads );459 info_thread(L) & popped = remove_first( blocked_threads ); 460 460 on_notify(*popped.lock, popped.t); 461 461 } … … 463 463 } 464 464 bool notify_all( fast_cond_var(L) & this ) with(this) { 465 bool ret = ! blocked_threads`isEmpty;466 while( ! blocked_threads`isEmpty) {467 info_thread(L) & popped = try_pop_front( blocked_threads );465 bool ret = ! isEmpty( blocked_threads ); 466 while( ! isEmpty( blocked_threads ) ) { 467 info_thread(L) & popped = remove_first( blocked_threads ); 468 468 on_notify(*popped.lock, popped.t); 469 469 } … … 471 471 } 472 472 473 uintptr_t front( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; }474 bool empty ( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }473 uintptr_t front( fast_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info; } 474 bool empty ( fast_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ); } 475 475 476 476 void wait( fast_cond_var(L) & this, L & l ) { … … 494 494 // pthread_cond_var 495 495 496 void 496 void ?{}( pthread_cond_var(L) & this ) with(this) { 497 497 blocked_threads{}; 498 498 lock{}; … … 503 503 bool notify_one( pthread_cond_var(L) & this ) with(this) { 504 504 lock( lock __cfaabi_dbg_ctx2 ); 505 bool ret = ! blocked_threads`isEmpty;505 bool ret = ! isEmpty( blocked_threads ); 506 506 if ( ret ) { 507 info_thread(L) & popped = try_pop_front( blocked_threads );507 info_thread(L) & popped = remove_first( blocked_threads ); 508 508 popped.signalled = true; 509 509 on_notify(*popped.lock, popped.t); … … 515 515 bool notify_all( pthread_cond_var(L) & this ) with(this) { 516 516 lock( lock __cfaabi_dbg_ctx2 ); 517 bool ret = ! blocked_threads`isEmpty;518 while( ! blocked_threads`isEmpty) {519 info_thread(L) & popped = try_pop_front( blocked_threads );517 bool ret = ! isEmpty( blocked_threads ); 518 while( ! isEmpty( blocked_threads ) ) { 519 info_thread(L) & popped = remove_first( blocked_threads ); 520 520 popped.signalled = true; 521 521 on_notify(*popped.lock, popped.t); … … 525 525 } 526 526 527 uintptr_t front( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; }528 bool empty ( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; }527 uintptr_t front( pthread_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ) ? NULL : first( blocked_threads ).info; } 528 bool empty ( pthread_cond_var(L) & this ) with(this) { return isEmpty( blocked_threads ); } 529 529 530 530 static void queue_info_thread_timeout( pthread_cond_var(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 531 531 lock( lock __cfaabi_dbg_ctx2 ); 532 532 insert_last( blocked_threads, info ); 533 533 pthread_alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info }; 534 534 unlock( lock ); 535 535 536 536 // blocks here and registers alarm node before blocking after releasing locks to avoid deadlock 537 537 size_t recursion_count = block_and_get_recursion( info, cond_alarm_register, (void *)(&node_wrap.alarm_node) ); 538 538 539 539 // unregisters alarm so it doesn't go off if signal happens first … … 551 551 lock( lock __cfaabi_dbg_ctx2 ); 552 552 info_thread( L ) i = { active_thread(), info, &l }; 553 554 unlock( lock ); 555 556 553 insert_last( blocked_threads, i ); 554 unlock( lock ); 555 556 // blocks here 557 557 size_t recursion_count = block_and_get_recursion( i ); 558 558 … … 579 579 } 580 580 581 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t 581 bool wait( pthread_cond_var(L) & this, L & l, uintptr_t info, timespec t ) { 582 582 PTHREAD_WAIT_TIME( info, &l , getDuration( t ) ) 583 583 } … … 585 585 //----------------------------------------------------------------------------- 586 586 // Semaphore 587 void 587 void ?{}( semaphore & this, int count = 1 ) { 588 588 (this.lock){}; 589 589 this.count = count; … … 603 603 park(); 604 604 return true; 605 } 606 else { 607 unlock( lock ); 608 return false; 605 } else { 606 unlock( lock ); 607 return false; 609 608 } 610 609 } … … 622 621 623 622 // make new owner 624 if ( doUnpark ) unpark( thrd );623 if ( doUnpark ) unpark( thrd ); 625 624 626 625 return thrd; -
libcfa/src/concurrency/locks.hfa
rf85de47 r65bd3c2 11 11 // Created On : Thu Jan 21 19:46:50 2021 12 12 // Last Modified By : Peter A. Buhr 13 // Last Modified On : Tue Dec 24 09:36:52 202414 // Update Count : 1613 // Last Modified On : Fri Apr 25 07:14:16 2025 14 // Update Count : 22 15 15 // 16 16 … … 56 56 57 57 static inline void pre_park_then_park( __cfa_pre_park pp_fn, void * pp_datum ) { 58 59 58 pp_fn( pp_datum ); 59 park(); 60 60 } 61 61 … … 63 63 64 64 #define DEFAULT_ON_NOTIFY( lock_type ) \ 65 65 static inline void on_notify( lock_type & /*this*/, thread$ * t ){ unpark( t ); } 66 66 67 67 #define DEFAULT_ON_WAIT( lock_type ) \ 68 69 70 71 72 68 static inline size_t on_wait( lock_type & this, __cfa_pre_park pp_fn, void * pp_datum ) { \ 69 unlock( this ); \ 70 pre_park_then_park( pp_fn, pp_datum ); \ 71 return 0; \ 72 } 73 73 74 74 // on_wakeup impl if lock should be reacquired after waking up 75 75 #define DEFAULT_ON_WAKEUP_REACQ( lock_type ) \ 76 76 static inline void on_wakeup( lock_type & this, size_t /*recursion*/ ) { lock( this ); } 77 77 78 78 // on_wakeup impl if lock will not be reacquired after waking up 79 79 #define DEFAULT_ON_WAKEUP_NO_REACQ( lock_type ) \ 80 80 static inline void on_wakeup( lock_type & /*this*/, size_t /*recursion*/ ) {} 81 81 82 82 … … 142 142 static inline void ?{}( mcs_node & this ) { this.next = 0p; } 143 143 144 static inline mcs_node * volatile & ?`next( mcs_node * node ) {144 static inline mcs_node * volatile & next( mcs_node * node ) { 145 145 return node->next; 146 146 } … … 156 156 157 157 static inline void unlock( mcs_lock & l, mcs_node & n ) { 158 mcs_node * n ext = advance( l.queue, &n );159 if ( n ext ) post( next->sem );158 mcs_node * nxt = advance( l.queue, &n ); 159 if ( nxt ) post( nxt->sem ); 160 160 } 161 161 … … 181 181 182 182 static inline void lock( mcs_spin_lock & l, mcs_spin_node & n ) { 183 183 n.locked = true; 184 184 185 185 #if defined( __ARM_ARCH ) … … 187 187 #endif 188 188 189 mcs_spin_node * prev = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST );190 if ( prev == 0p ) return;191 prev ->next = &n;189 mcs_spin_node * prev_val = __atomic_exchange_n( &l.queue.tail, &n, __ATOMIC_SEQ_CST ); 190 if ( prev_val == 0p ) return; 191 prev_val->next = &n; 192 192 193 193 #if defined( __ARM_ARCH ) … … 234 234 // to use for FUTEX_WAKE and FUTEX_WAIT (other futex calls will need more params) 235 235 static inline int futex( int *uaddr, int futex_op, int val ) { 236 236 return syscall( SYS_futex, uaddr, futex_op, val, NULL, NULL, 0 ); 237 237 } 238 238 … … 271 271 static inline void unlock( futex_mutex & this ) with( this ) { 272 272 // if uncontended do atomic unlock and then return 273 273 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return; 274 274 275 275 // otherwise threads are blocked so we must wake one … … 311 311 int state, init_state; 312 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 313 // speculative grab 314 state = internal_exchange( this, 1 ); 315 if ( ! state ) return; // state == 0 316 init_state = state; 317 for () { 318 for ( 4 ) { 319 while ( ! val ) { // lock unlocked 320 state = 0; 321 if ( internal_try_lock( this, state, init_state ) ) return; 322 } 323 for ( 30 ) Pause(); 324 } 325 326 while ( ! val ) { // lock unlocked 327 state = 0; 328 if ( internal_try_lock( this, state, init_state ) ) return; 329 } 330 sched_yield(); 331 332 // if not in contended state, set to be in contended state 333 state = internal_exchange( this, 2 ); 334 if ( ! state ) return; // state == 0 335 init_state = 2; 336 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK 337 } 338 338 } 339 339 340 340 static inline void unlock( go_mutex & this ) with( this ) { 341 341 // if uncontended do atomic unlock and then return 342 342 if ( __atomic_exchange_n( &val, 0, __ATOMIC_RELEASE ) == 1 ) return; 343 343 344 344 // otherwise threads are blocked so we must wake one … … 384 384 385 385 static inline bool block( exp_backoff_then_block_lock & this ) with( this ) { 386 387 388 389 390 391 392 386 lock( spinlock __cfaabi_dbg_ctx2 ); 387 if ( __atomic_load_n( &lock_value, __ATOMIC_SEQ_CST ) != 2 ) { 388 unlock( spinlock ); 389 return true; 390 } 391 insert_last( blocked_threads, *active_thread() ); 392 unlock( spinlock ); 393 393 park( ); 394 394 return true; … … 415 415 416 416 static inline void unlock( exp_backoff_then_block_lock & this ) with( this ) { 417 418 419 thread$ * t = &try_pop_front( blocked_threads );420 421 417 if ( __atomic_exchange_n( &lock_value, 0, __ATOMIC_RELEASE ) == 1 ) return; 418 lock( spinlock __cfaabi_dbg_ctx2 ); 419 thread$ * t = &remove_first( blocked_threads ); 420 unlock( spinlock ); 421 unpark( t ); 422 422 } 423 423 … … 469 469 lock( lock __cfaabi_dbg_ctx2 ); 470 470 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); 471 thread$ * t = & try_pop_front( blocked_threads );471 thread$ * t = &remove_first( blocked_threads ); 472 472 held = ( t ? true : false ); 473 473 unpark( t ); … … 476 476 477 477 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with( this ) { 478 479 480 478 lock( lock __cfaabi_dbg_ctx2 ); 479 insert_last( blocked_threads, *t ); 480 unlock( lock ); 481 481 } 482 482 DEFAULT_ON_WAIT( fast_block_lock ) … … 521 521 522 522 if ( owner != 0p ) { 523 523 select_node node; 524 524 insert_last( blocked_threads, node ); 525 525 unlock( lock ); … … 533 533 534 534 static inline void pop_node( simple_owner_lock & this ) with( this ) { 535 536 select_node * node = &try_pop_front( blocked_threads );537 538 539 540 541 542 543 544 545 535 __handle_waituntil_OR( blocked_threads ); 536 select_node * node = &remove_first( blocked_threads ); 537 if ( node ) { 538 owner = node->blocked_thread; 539 recursion_count = 1; 540 // if ( ! node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 541 wake_one( blocked_threads, *node ); 542 } else { 543 owner = 0p; 544 recursion_count = 0; 545 } 546 546 } 547 547 … … 582 582 pop_node( this ); 583 583 584 585 586 unlock( lock ); 587 588 584 select_node node; 585 active_thread()->link_node = (void *)&node; 586 unlock( lock ); 587 588 pre_park_then_park( pp_fn, pp_datum ); 589 589 590 590 return ret; … … 595 595 // waituntil() support 596 596 static inline bool register_select( simple_owner_lock & this, select_node & node ) with( this ) { 597 598 599 600 601 602 603 604 605 606 607 597 lock( lock __cfaabi_dbg_ctx2 ); 598 599 // check if we can complete operation. If so race to establish winner in special OR case 600 if ( ! node.park_counter && ( owner == active_thread() || owner == 0p ) ) { 601 if ( ! __make_select_node_available( node ) ) { // we didn't win the race so give up on registering 602 unlock( lock ); 603 return false; 604 } 605 } 606 607 if ( owner == active_thread() ) { 608 608 recursion_count++; 609 610 609 if ( node.park_counter ) __make_select_node_available( node ); 610 unlock( lock ); 611 611 return true; 612 612 } 613 613 614 614 if ( owner != 0p ) { 615 615 insert_last( blocked_threads, node ); 616 616 unlock( lock ); 617 617 return false; 618 618 } 619 619 620 620 owner = active_thread(); 621 621 recursion_count = 1; 622 622 623 624 625 623 if ( node.park_counter ) __make_select_node_available( node ); 624 unlock( lock ); 625 return true; 626 626 } 627 627 628 628 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with( this ) { 629 630 if ( node`isListed) {631 632 633 634 635 636 637 638 639 640 641 642 643 629 lock( lock __cfaabi_dbg_ctx2 ); 630 if ( isListed( node ) ) { 631 remove( node ); 632 unlock( lock ); 633 return false; 634 } 635 636 if ( owner == active_thread() ) { 637 recursion_count--; 638 if ( recursion_count == 0 ) { 639 pop_node( this ); 640 } 641 } 642 unlock( lock ); 643 return false; 644 644 } 645 645 -
libcfa/src/concurrency/monitor.cfa
rf85de47 r65bd3c2 9 9 // Author : Thierry Delisle 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 // Last Modified By : Kyoung Seo12 // Last Modified On : Thd Jan 16 12:59:00202513 // Update Count : 7311 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 25 07:20:22 2025 13 // Update Count : 80 14 14 // 15 15 … … 78 78 __spinlock_t * locks[count]; /* We need to pass-in an array of locks to BlockInternal */ 79 79 80 #define monitor_save save( monitors, count, locks, recursions, masks )80 #define monitor_save save ( monitors, count, locks, recursions, masks ) 81 81 #define monitor_restore restore( monitors, count, locks, recursions, masks ) 82 82 … … 95 95 if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) { 96 96 abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this ); 97 } else if ( ! this->owner ) {97 } else if ( ! this->owner ) { 98 98 // No one has the monitor, just take it 99 99 __set_owner( this, thrd ); 100 100 101 __cfaabi_dbg_print_safe( "Kernel : 101 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 102 102 } else if ( this->owner == thrd) { 103 103 // We already have the monitor, just note how many times we took it 104 104 this->recursion += 1; 105 105 106 __cfaabi_dbg_print_safe( "Kernel : 106 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 107 107 } else if ( is_accepted( this, group) ) { 108 108 // Some one was waiting for us, enter … … 112 112 reset_mask( this ); 113 113 114 __cfaabi_dbg_print_safe( "Kernel : 114 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 115 115 } else { 116 __cfaabi_dbg_print_safe( "Kernel : 116 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 117 117 118 118 // Some one else has the monitor, wait in line for it … … 124 124 park(); 125 125 126 __cfaabi_dbg_print_safe( "Kernel : %10p Entered 126 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 127 127 128 128 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); … … 130 130 } 131 131 132 __cfaabi_dbg_print_safe( "Kernel : %10p Entered 132 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 133 133 134 134 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); … … 152 152 153 153 154 if ( ! this->owner ) {154 if ( ! this->owner ) { 155 155 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 156 156 … … 159 159 160 160 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 161 /* paranoid */ verify( ! is_thrd || thrd->state == Halted || thrd->state == Cancelled );161 /* paranoid */ verify( ! is_thrd || thrd->state == Halted || thrd->state == Cancelled ); 162 162 163 163 unlock( this->lock ); 164 164 return; 165 } else if ( this->owner == thrd && ! join) {165 } else if ( this->owner == thrd && ! join) { 166 166 // We already have the monitor... but where about to destroy it so the nesting will fail 167 167 // Abort! … … 179 179 180 180 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 181 /* paranoid */ verify( ! is_thrd || thrd->state == Halted || thrd->state == Cancelled );181 /* paranoid */ verify( ! is_thrd || thrd->state == Halted || thrd->state == Cancelled ); 182 182 183 183 unlock( this->lock ); … … 186 186 187 187 // The monitor is busy, if this is a thread and the thread owns itself, it better be active 188 /* paranoid */ verify( ! is_thrd || this->owner != thrd || (thrd->state != Halted && thrd->state != Cancelled) );188 /* paranoid */ verify( ! is_thrd || this->owner != thrd || (thrd->state != Halted && thrd->state != Cancelled) ); 189 189 190 190 __lock_size_t count = 1; … … 192 192 __monitor_group_t group = { &this, 1, func }; 193 193 if ( is_accepted( this, group) ) { 194 __cfaabi_dbg_print_safe( "Kernel : 194 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 195 195 196 196 // Wake the thread that is waiting for this … … 220 220 return; 221 221 } else { 222 __cfaabi_dbg_print_safe( "Kernel : 222 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 223 223 224 224 wait_ctx( thrd, 0 ) … … 254 254 // it means we don't need to do anything 255 255 if ( this->recursion != 0) { 256 __cfaabi_dbg_print_safe( "Kernel : 256 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 257 257 unlock( this->lock ); 258 258 return; … … 264 264 // Check the new owner is consistent with who we wake-up 265 265 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor 266 /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );266 /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 267 267 268 268 // We can now let other threads in safely … … 270 270 271 271 //We need to wake-up the thread 272 /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );272 /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 273 273 unpark( new_owner ); 274 274 } … … 280 280 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, active_thread(), this->owner); 281 281 } 282 if ( this->recursion != 1 && !join ) {282 if ( this->recursion != 1 && ! join ) { 283 283 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 284 284 } … … 317 317 318 318 // Unpark the next owner if needed 319 /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );319 /* paranoid */ verifyf( ! new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 320 320 /* paranoid */ verify( ! __preemption_enabled() ); 321 321 /* paranoid */ verify( thrd->state == Halted ); … … 424 424 425 425 static void ?{}(__condition_criterion_t & this ) with( this ) { 426 ready 426 ready = false; 427 427 target = 0p; 428 owner 429 next= 0p;428 owner = 0p; 429 this.next = 0p; 430 430 } 431 431 432 432 static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 433 this.ready 433 this.ready = false; 434 434 this.target = target; 435 this.owner 436 this.next 435 this.owner = &owner; 436 this.next = 0p; 437 437 } 438 438 … … 525 525 for ( i; count ) { 526 526 __condition_criterion_t * crit = &node->criteria[i]; 527 assert( ! crit->ready );527 assert( ! crit->ready ); 528 528 push( crit->target->signal_stack, crit ); 529 529 } … … 536 536 537 537 bool signal_block( condition & this ) libcfa_public { 538 if ( ! this.blocked.head ) { return false; }538 if ( ! this.blocked.head ) { return false; } 539 539 540 540 //Check that everything is as expected … … 571 571 // WE WOKE UP 572 572 573 __cfaabi_dbg_print_buffer_local( "Kernel : 573 __cfaabi_dbg_print_buffer_local( "Kernel : signal_block returned\n" ); 574 574 575 575 //We are back, restore the masks and recursions … … 581 581 // Access the user_info of the thread waiting at the front of the queue 582 582 uintptr_t front( condition & this ) libcfa_public { 583 verifyf( ! is_empty(this),583 verifyf( ! is_empty(this), 584 584 "Attempt to access user data on an empty condition.\n" 585 585 "Possible cause is not checking if the condition is empty before reading stored data." … … 624 624 { 625 625 // Check if the entry queue 626 thread$ * n ext; int index;627 [n ext, index] = search_entry_queue( mask, monitors, count );628 629 if ( n ext ) {626 thread$ * nxt; int index; 627 [nxt, index] = search_entry_queue( mask, monitors, count ); 628 629 if ( nxt ) { 630 630 *mask.accepted = index; 631 631 __acceptable_t& accepted = mask[index]; 632 632 if ( accepted.is_dtor ) { 633 633 __cfaabi_dbg_print_buffer_local( "Kernel : dtor already there\n" ); 634 verifyf( accepted.size == 1, 634 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 635 635 636 636 monitor$ * mon2dtor = accepted[0]; … … 651 651 monitor_save; 652 652 653 __cfaabi_dbg_print_buffer_local( "Kernel : 653 __cfaabi_dbg_print_buffer_local( "Kernel : baton of %"PRIdFAST16" monitors : ", count ); 654 654 #ifdef __CFA_DEBUG_PRINT__ 655 655 for ( i; count ) { … … 660 660 661 661 // Set the owners to be the next thread 662 __set_owner( monitors, count, n ext );662 __set_owner( monitors, count, nxt ); 663 663 664 664 // unlock all the monitors … … 666 666 667 667 // unpark the thread we signalled 668 unpark( n ext );668 unpark( nxt ); 669 669 670 670 //Everything is ready to go to sleep … … 741 741 /* paranoid */ verify ( monitors[0]->lock.lock ); 742 742 /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] ); 743 monitors[0]->owner 744 monitors[0]->recursion 743 monitors[0]->owner = owner; 744 monitors[0]->recursion = 1; 745 745 for ( i; 1~count ) { 746 746 /* paranoid */ verify ( monitors[i]->lock.lock ); 747 747 /* paranoid */ verifyf( monitors[i]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[i]->owner, monitors[i]->recursion, monitors[i] ); 748 monitors[i]->owner 749 monitors[i]->recursion 748 monitors[i]->owner = owner; 749 monitors[i]->recursion = 0; 750 750 } 751 751 } … … 765 765 static inline thread$ * next_thread( monitor$ * this ) { 766 766 //Check the signaller stack 767 __cfaabi_dbg_print_safe( "Kernel : 767 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); 768 768 __condition_criterion_t * urgent = pop( this->signal_stack ); 769 769 if ( urgent ) { … … 771 771 //regardless of if we are ready to baton pass, 772 772 //we need to set the monitor as in use 773 /* paranoid */ verifyf( ! this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );774 __set_owner( this, 773 /* paranoid */ verifyf( ! this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 774 __set_owner( this, urgent->owner->waiting_thread ); 775 775 776 776 return check_condition( urgent ); … … 780 780 // Get the next thread in the entry_queue 781 781 thread$ * new_owner = pop_head( this->entry_queue ); 782 /* paranoid */ verifyf( ! this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );783 /* paranoid */ verify( ! new_owner || new_owner->user_link.next == 0p );782 /* paranoid */ verifyf( ! this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 783 /* paranoid */ verify( ! new_owner || new_owner->user_link.next == 0p ); 784 784 __set_owner( this, new_owner ); 785 785 … … 792 792 793 793 // Check if there are any acceptable functions 794 if ( ! it ) return false;794 if ( ! it ) return false; 795 795 796 796 // If this isn't the first monitor to test this, there is no reason to repeat the test. … … 820 820 for ( i; count ) { 821 821 (criteria[i]){ monitors[i], waiter }; 822 __cfaabi_dbg_print_safe( "Kernel : 822 __cfaabi_dbg_print_safe( "Kernel : target %p = %p\n", criteria[i].target, &criteria[i] ); 823 823 push( criteria[i].target->signal_stack, &criteria[i] ); 824 824 } … … 902 902 } 903 903 904 __cfaabi_dbg_print_safe( "Kernel : 904 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? (thread*)node->waiting_thread : (thread*)0p ); 905 905 return ready2run ? node->waiting_thread : 0p; 906 906 } … … 908 908 static inline void brand_condition( condition & this ) { 909 909 thread$ * thrd = active_thread(); 910 if ( ! this.monitors ) {910 if ( ! this.monitors ) { 911 911 // __cfaabi_dbg_print_safe( "Branding\n" ); 912 912 assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data ); … … 928 928 for ( __acceptable_t * it = begin; it != end; it++, i++ ) { 929 929 #if defined( __CFA_WITH_VERIFY__ ) 930 thread$ * last= 0p;930 thread$ * prior = 0p; 931 931 #endif // __CFA_WITH_VERIFY__ 932 932 … … 934 934 thread$ * curr = *thrd_it; 935 935 936 /* paranoid */ verifyf( ! last || last->user_link.next == curr, "search not making progress, from %p (%p) to %p",937 last, last->user_link.next, curr );938 /* paranoid */ verifyf( curr != last, "search not making progress, from %p to %p", last, curr );936 /* paranoid */ verifyf( ! prior || prior->user_link.next == curr, "search not making progress, from %p (%p) to %p", 937 prior, prior->user_link.next, curr ); 938 /* paranoid */ verifyf( curr != prior, "search not making progress, from %p to %p", prior, curr ); 939 939 940 940 // For each thread in the entry-queue check for a match … … 945 945 946 946 #if defined( __CFA_WITH_VERIFY__ ) 947 last= curr;947 prior = curr; 948 948 #endif 949 949 } // for … … 1001 1001 if ( unlikely(0 != (0x1 & (uintptr_t)this->owner)) ) { 1002 1002 abort( "Attempt by thread \"%.256s\" (%p) to access joined monitor %p.", thrd->self_cor.name, thrd, this ); 1003 } else if ( ! this->owner ) {1003 } else if ( ! this->owner ) { 1004 1004 // No one has the monitor, just take it 1005 1005 __set_owner( this, thrd ); 1006 1006 1007 __cfaabi_dbg_print_safe( "Kernel : 1007 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 1008 1008 } else if ( this->owner == thrd) { 1009 1009 // We already have the monitor, just note how many times we took it 1010 1010 this->recursion += 1; 1011 1011 1012 __cfaabi_dbg_print_safe( "Kernel : 1012 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 1013 1013 } else { 1014 __cfaabi_dbg_print_safe( "Kernel : 1014 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 1015 1015 1016 1016 // Some one else has the monitor, wait in line for it … … 1022 1022 park(); 1023 1023 1024 __cfaabi_dbg_print_safe( "Kernel : %10p Entered 1024 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 1025 1025 1026 1026 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); … … 1028 1028 } 1029 1029 1030 __cfaabi_dbg_print_safe( "Kernel : %10p Entered 1030 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 1031 1031 1032 1032 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); -
libcfa/src/concurrency/preemption.cfa
rf85de47 r65bd3c2 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Jan 9 08:42:59 202313 // Update Count : 6 012 // Last Modified On : Fri Apr 25 07:24:39 2025 13 // Update Count : 63 14 14 // 15 15 … … 39 39 __attribute__((weak)) Duration default_preemption() libcfa_public { 40 40 const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION"); 41 if (!preempt_rate_s) {41 if ( !preempt_rate_s) { 42 42 __cfadbg_print_safe(preemption, "No CFA_DEFAULT_PREEMPTION in ENV\n"); 43 43 return __CFA_DEFAULT_PREEMPTION__; … … 46 46 char * endptr = 0p; 47 47 long int preempt_rate_l = strtol(preempt_rate_s, &endptr, 10); 48 if (preempt_rate_l < 0 || preempt_rate_l > 65535) {48 if (preempt_rate_l < 0 || preempt_rate_l > 65535) { 49 49 __cfadbg_print_safe(preemption, "CFA_DEFAULT_PREEMPTION out of range : %ld\n", preempt_rate_l); 50 50 return __CFA_DEFAULT_PREEMPTION__; 51 51 } 52 if ('\0' != *endptr) {52 if ('\0' != *endptr) { 53 53 __cfadbg_print_safe(preemption, "CFA_DEFAULT_PREEMPTION not a decimal number : %s\n", preempt_rate_s); 54 54 return __CFA_DEFAULT_PREEMPTION__; … … 64 64 // FwdDeclarations : Signal handlers 65 65 static void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 66 static void sigHandler_alarm 67 static void sigHandler_segv 68 static void sigHandler_ill 69 static void sigHandler_fpe 70 static void sigHandler_abort 66 static void sigHandler_alarm( __CFA_SIGPARMS__ ); 67 static void sigHandler_segv( __CFA_SIGPARMS__ ); 68 static void sigHandler_ill( __CFA_SIGPARMS__ ); 69 static void sigHandler_fpe( __CFA_SIGPARMS__ ); 70 static void sigHandler_abort( __CFA_SIGPARMS__ ); 71 71 72 72 // FwdDeclarations : alarm thread main … … 86 86 #endif 87 87 88 KERNEL_STORAGE(event_kernel_t, event_kernel); 89 event_kernel_t * event_kernel; 90 static pthread_t alarm_thread; 91 static void * alarm_stack; 88 KERNEL_STORAGE(event_kernel_t, event_kernel); // private storage for event kernel 89 event_kernel_t * event_kernel; // kernel public handle to even kernel 90 static pthread_t alarm_thread; // pthread handle to alarm thread 91 static void * alarm_stack; // pthread stack for alarm thread 92 92 93 93 static void ?{}(event_kernel_t & this) with( this ) { … … 102 102 // Get next expired node 103 103 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 104 if ( ! & (*alarms)`first ) return 0p;// If no alarms return null105 if ( (*alarms)`first.deadline >= currtime ) return 0p;// If alarms head not expired return null106 return pop(alarms); // Otherwise just pop head104 if ( ! & first( *alarms ) ) return 0p; // If no alarms return null 105 if ( first( *alarms ).deadline >= currtime ) return 0p; // If alarms head not expired return null 106 return pop(alarms); // Otherwise just pop head 107 107 } 108 108 … … 117 117 __cfadbg_print_buffer_decl( preemption, " KERNEL: preemption tick %lu\n", currtime.tn); 118 118 Duration period = node->period; 119 if ( period == 0 ) {120 node->set = false; 119 if ( period == 0 ) { 120 node->set = false; // Node is one-shot, just mark it as not pending 121 121 } 122 122 … … 125 125 126 126 // Check if this is a kernel 127 if ( node->type == Kernel ) {127 if ( node->type == Kernel ) { 128 128 preempt( node->proc ); 129 129 } 130 else if ( node->type == User ) {130 else if ( node->type == User ) { 131 131 __cfadbg_print_buffer_local( preemption, " KERNEL: alarm unparking %p.\n", node->thrd ); 132 132 timeout( node->thrd ); … … 137 137 138 138 // Check if this is a periodic alarm 139 if ( period > 0 ) {139 if ( period > 0 ) { 140 140 __cfadbg_print_buffer_local( preemption, " KERNEL: alarm period is %lu.\n", period`ns ); 141 141 node->deadline = currtime + period; // Alarm is periodic, add currtime to it (used cached current time) 142 insert( alarms, node ); 142 insert( alarms, node ); // Reinsert the node for the next time it triggers 143 143 } 144 144 } 145 145 146 146 // If there are still alarms pending, reset the timer 147 if ( & (*alarms)`first) {148 Duration delta = (*alarms)`first.deadline - currtime;147 if ( & first( *alarms ) ) { 148 Duration delta = first( *alarms ).deadline - currtime; 149 149 __kernel_set_timer( delta ); 150 150 } … … 283 283 __attribute__((unused)) unsigned short new_val = disable_count + 1; 284 284 disable_count = new_val; 285 verify( new_val < 65_000u ); 285 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 286 286 } 287 287 … … 301 301 302 302 // Check if we need to prempt the thread because an interrupt was missed 303 if ( prev == 1 ) {303 if ( prev == 1 ) { 304 304 #if GCC_VERSION > 50000 305 305 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); … … 313 313 // Signal the compiler that a fence is needed but only for signal handlers 314 314 __atomic_signal_fence(__ATOMIC_RELEASE); 315 if ( poll && proc->pending_preemption ) {315 if ( poll && proc->pending_preemption ) { 316 316 proc->pending_preemption = false; 317 317 force_yield( __POLL_PREEMPTION ); … … 334 334 // Signal the compiler that a fence is needed but only for signal handlers 335 335 __atomic_signal_fence(__ATOMIC_RELEASE); 336 if ( unlikely( proc->pending_preemption ) ) {336 if ( unlikely( proc->pending_preemption ) ) { 337 337 proc->pending_preemption = false; 338 338 force_yield( __POLL_PREEMPTION ); … … 347 347 void __cfaabi_check_preemption() libcfa_public { 348 348 bool ready = __preemption_enabled(); 349 if (!ready) { abort("Preemption should be ready"); }349 if ( !ready) { abort("Preemption should be ready"); } 350 350 351 351 sigset_t oldset; 352 352 int ret; 353 353 ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 354 if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }354 if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 355 355 356 356 ret = sigismember(&oldset, SIGUSR1); 357 if (ret < 0) { abort("ERROR sigismember returned %d", ret); }358 if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }357 if (ret < 0) { abort("ERROR sigismember returned %d", ret); } 358 if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 359 359 360 360 ret = sigismember(&oldset, SIGALRM); 361 if (ret < 0) { abort("ERROR sigismember returned %d", ret); }362 if (ret == 0) { abort("ERROR SIGALRM is enabled"); }361 if (ret < 0) { abort("ERROR sigismember returned %d", ret); } 362 if (ret == 0) { abort("ERROR SIGALRM is enabled"); } 363 363 364 364 ret = sigismember(&oldset, SIGTERM); 365 if (ret < 0) { abort("ERROR sigismember returned %d", ret); }366 if (ret == 1) { abort("ERROR SIGTERM is disabled"); }365 if (ret < 0) { abort("ERROR sigismember returned %d", ret); } 366 if (ret == 1) { abort("ERROR SIGTERM is disabled"); } 367 367 } 368 368 … … 385 385 386 386 if ( __cfaabi_pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) { 387 387 abort( "internal error, pthread_sigmask" ); 388 388 } 389 389 } … … 415 415 int ret; 416 416 ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 417 if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }417 if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 418 418 419 419 ret = sigismember(&oldset, SIGUSR1); 420 if (ret < 0) { abort("ERROR sigismember returned %d", ret); }421 if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }420 if (ret < 0) { abort("ERROR sigismember returned %d", ret); } 421 if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 422 422 423 423 ret = sigismember(&oldset, SIGALRM); 424 if (ret < 0) { abort("ERROR sigismember returned %d", ret); }425 if (ret == 0) { abort("ERROR SIGALRM is enabled"); }424 if (ret < 0) { abort("ERROR sigismember returned %d", ret); } 425 if (ret == 0) { abort("ERROR SIGALRM is enabled"); } 426 426 427 427 signal_block( SIGUSR1 ); … … 434 434 int ret; 435 435 ret = __cfaabi_pthread_sigmask(0, ( const sigset_t * ) 0p, &oldset); // workaround trac#208: cast should be unnecessary 436 if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); }436 if (ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 437 437 438 438 ret = sigismember(&oldset, SIGUSR1); 439 if (ret < 0) { abort("ERROR sigismember returned %d", ret); }440 if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); }439 if (ret < 0) { abort("ERROR sigismember returned %d", ret); } 440 if (ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 441 441 442 442 ret = sigismember(&oldset, SIGALRM); 443 if (ret < 0) { abort("ERROR sigismember returned %d", ret); }444 if (ret == 0) { abort("ERROR SIGALRM is enabled"); }443 if (ret < 0) { abort("ERROR sigismember returned %d", ret); } 444 if (ret == 0) { abort("ERROR SIGALRM is enabled"); } 445 445 } 446 446 … … 453 453 // Check if preemption is safe 454 454 bool ready = true; 455 if ( __cfaabi_in( ip, __libcfa_nopreempt ) ) { ready = false; goto EXIT; };456 if ( __cfaabi_in( ip, __libcfathrd_nopreempt ) ) { ready = false; goto EXIT; };457 458 if ( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; };459 if ( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; };455 if ( __cfaabi_in( ip, __libcfa_nopreempt ) ) { ready = false; goto EXIT; }; 456 if ( __cfaabi_in( ip, __libcfathrd_nopreempt ) ) { ready = false; goto EXIT; }; 457 458 if ( !__cfaabi_tls.preemption_state.enabled) { ready = false; goto EXIT; }; 459 if ( __cfaabi_tls.preemption_state.in_progress ) { ready = false; goto EXIT; }; 460 460 461 461 EXIT: … … 484 484 // Setup proper signal handlers 485 485 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); // __cfactx_switch handler 486 __cfaabi_sigaction( SIGALRM, sigHandler_alarm 486 __cfaabi_sigaction( SIGALRM, sigHandler_alarm , SA_SIGINFO ); // debug handler 487 487 488 488 signal_block( SIGALRM ); … … 551 551 // before the kernel thread has even started running. When that happens, an interrupt 552 552 // with a null 'this_processor' will be caught, just ignore it. 553 if (! __cfaabi_tls.this_processor ) return;553 if ( ! __cfaabi_tls.this_processor ) return; 554 554 555 555 choose(sfp->si_value.sival_int) { 556 case PREEMPT_NORMAL : ;// Normal case, nothing to do here557 case PREEMPT_IO : ;// I/O asked to stop spinning, nothing to do here556 case PREEMPT_NORMAL: ; // Normal case, nothing to do here 557 case PREEMPT_IO: ; // I/O asked to stop spinning, nothing to do here 558 558 case PREEMPT_TERMINATE: verify( __atomic_load_n( &__cfaabi_tls.this_processor->do_terminate, __ATOMIC_SEQ_CST ) ); 559 559 default: … … 562 562 563 563 // Check if it is safe to preempt here 564 if ( !preemption_ready( ip ) ) {564 if ( !preemption_ready( ip ) ) { 565 565 #if !defined(__CFA_NO_STATISTICS__) 566 566 __cfaabi_tls.this_stats->ready.threads.preempt.rllfwd++; … … 607 607 sigfillset(&mask); 608 608 if ( __cfaabi_pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 609 609 abort( "internal error, pthread_sigmask" ); 610 610 } 611 611 … … 622 622 __cfadbg_print_buffer_local( preemption, " KERNEL: SI_QUEUE %d, SI_TIMER %d, SI_KERNEL %d\n", SI_QUEUE, SI_TIMER, SI_KERNEL ); 623 623 624 if ( sig < 0 ) {624 if ( sig < 0 ) { 625 625 //Error! 626 626 int err = errno; -
libcfa/src/concurrency/pthread.cfa
rf85de47 r65bd3c2 9 9 // Author : Zhenyan Zhu 10 10 // Created On : Sat Aug 6 16:29:18 2022 11 // Last Modified By : Kyoung Seo12 // Last Modified On : Mon Jan 27 20:35:00202513 // Update Count : 111 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Apr 25 07:28:01 2025 13 // Update Count : 4 14 14 // 15 15 … … 40 40 bool in_use; 41 41 void (* destructor)( void * ); 42 42 dlist( pthread_values ) threads; 43 43 }; 44 44 … … 543 543 // p.in_use = false; 544 544 // } 545 pthread_values * p = &try_pop_front( cfa_pthread_keys[key].threads ); 546 for ( ; p; ) { 547 p->in_use = false; 548 p = &try_pop_front( cfa_pthread_keys[key].threads ); 549 } 545 for ( pthread_values * p = &remove_first( cfa_pthread_keys[key].threads ); p; p = &remove_first( cfa_pthread_keys[key].threads ) ) { 546 p->in_use = false; 547 } 550 548 unlock(key_lock); 551 549 return 0; … … 603 601 //######################### Parallelism ######################### 604 602 void pthread_delete_kernel_threads_() __THROW { // see uMain::~uMain 605 Pthread_kernel_threads * p = &try_pop_front(cfa_pthreads_kernel_threads); 606 for ( ; p; ) { 607 delete(p); 608 p = &try_pop_front(cfa_pthreads_kernel_threads); 603 604 for ( Pthread_kernel_threads * p = &remove_first(cfa_pthreads_kernel_threads); p; p = &remove_first(cfa_pthreads_kernel_threads) ) { 605 delete(p); 609 606 } // for 610 607 } // pthread_delete_kernel_threads_ … … 626 623 } // for 627 624 for ( ; new_level < cfa_pthreads_no_kernel_threads; cfa_pthreads_no_kernel_threads -= 1 ) { // remove processors ? 628 delete(& try_pop_front(cfa_pthreads_kernel_threads));625 delete(&remove_first(cfa_pthreads_kernel_threads)); 629 626 } // for 630 627 unlock( concurrency_lock ); -
libcfa/src/concurrency/select.hfa
rf85de47 r65bd3c2 10 10 // Author : Colby Alexander Parsons 11 11 // Created On : Thu Jan 21 19:46:50 2023 12 // Last Modified By : Kyoung Seo13 // Last Modified On : Wed Mar 19 12:00:00202514 // Update Count : 112 // Last Modified By : Peter A. Buhr 13 // Last Modified On : Fri Apr 25 07:31:26 2025 14 // Update Count : 5 15 15 // 16 16 … … 33 33 static inline bool __CFA_has_clause_run( unsigned long int status ) { return status == __SELECT_RUN; } 34 34 static inline void __CFA_maybe_park( int * park_counter ) { 35 36 35 if ( __atomic_sub_fetch( park_counter, 1, __ATOMIC_SEQ_CST) < 0 ) 36 park(); 37 37 } 38 38 39 39 // node used for coordinating waituntil synchronization 40 40 struct select_node { 41 int * park_counter;// If this is 0p then the node is in a special OR case waituntil42 43 44 void * extra;// used to store arbitrary data needed by some primitives45 46 47 41 int * park_counter; // If this is 0p then the node is in a special OR case waituntil 42 unsigned long int * clause_status; // needs to point at ptr sized location, if this is 0p then node is not part of a waituntil 43 44 void * extra; // used to store arbitrary data needed by some primitives 45 46 thread$ * blocked_thread; 47 inline dlink(select_node); 48 48 }; 49 49 P9_EMBEDDED( select_node, dlink(select_node) ) 50 50 51 51 static inline void ?{}( select_node & this ) { 52 53 54 55 52 this.blocked_thread = active_thread(); 53 this.clause_status = 0p; 54 this.park_counter = 0p; 55 this.extra = 0p; 56 56 } 57 57 58 58 static inline void ?{}( select_node & this, thread$ * blocked_thread ) { 59 60 61 62 59 this.blocked_thread = blocked_thread; 60 this.clause_status = 0p; 61 this.park_counter = 0p; 62 this.extra = 0p; 63 63 } 64 64 65 65 static inline void ?{}( select_node & this, thread$ * blocked_thread, void * extra ) { 66 67 68 69 66 this.blocked_thread = blocked_thread; 67 this.clause_status = 0p; 68 this.park_counter = 0p; 69 this.extra = extra; 70 70 } 71 71 static inline void ^?{}( select_node & this ) {} … … 76 76 // this is used inside the compiler to attempt to establish an else clause as a winner in the OR special case race 77 77 static inline bool __select_node_else_race( select_node & this ) with( this ) { 78 79 80 78 unsigned long int cmp_status = __SELECT_UNSAT; 79 return *clause_status == 0 80 && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ); 81 81 } 82 82 … … 85 85 forall(T & | sized(T)) 86 86 trait is_selectable { 87 88 89 90 91 92 93 94 95 96 //passed as an arg to this routine. If true is returned proceed as normal, if false is returned the statement is skipped97 87 // For registering a select stmt on a selectable concurrency primitive 88 // Returns bool that indicates if operation is already SAT 89 bool register_select( T &, select_node & ); 90 91 // For unregistering a select stmt on a selectable concurrency primitive 92 // If true is returned then the corresponding code block is run (only in non-special OR case and only if node status is not RUN) 93 bool unregister_select( T &, select_node & ); 94 95 // This routine is run on the selecting thread prior to executing the statement corresponding to the select_node 96 // passed as an arg to this routine. If true is returned proceed as normal, if false is returned the statement is skipped 97 bool on_selected( T &, select_node & ); 98 98 }; 99 99 // Used inside the compiler to allow for overloading on return type for operations such as '?<<?' for channels … … 107 107 108 108 static inline void __make_select_node_unsat( select_node & this ) with( this ) { 109 109 __atomic_store_n( clause_status, __SELECT_UNSAT, __ATOMIC_SEQ_CST ); 110 110 } 111 111 static inline void __make_select_node_sat( select_node & this ) with( this ) { 112 112 __atomic_store_n( clause_status, __SELECT_SAT, __ATOMIC_SEQ_CST ); 113 113 } 114 114 115 115 // used for the 2-stage avail needed by the special OR case 116 116 static inline bool __mark_select_node( select_node & this, unsigned long int val ) with( this ) { 117 118 119 120 121 while( !__atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {122 123 124 125 117 /* paranoid */ verify( park_counter == 0p ); 118 /* paranoid */ verify( clause_status != 0p ); 119 120 unsigned long int cmp_status = __SELECT_UNSAT; 121 while( ! __atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) { 122 if ( cmp_status != __SELECT_PENDING ) return false; 123 cmp_status = __SELECT_UNSAT; 124 } 125 return true; 126 126 } 127 127 128 128 // used for the 2-stage avail by the thread who owns a pending node 129 129 static inline bool __pending_set_other( select_node & other, select_node & mine, unsigned long int val ) with( other ) { 130 131 132 133 134 while( !__atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) {135 136 137 138 139 140 141 if ( !__atomic_compare_exchange_n( mine.clause_status, &cmp_status, __SELECT_PENDING, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) )142 143 144 145 130 /* paranoid */ verify( park_counter == 0p ); 131 /* paranoid */ verify( clause_status != 0p ); 132 133 unsigned long int cmp_status = __SELECT_UNSAT; 134 while( ! __atomic_compare_exchange_n( clause_status, &cmp_status, val, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) { 135 if ( cmp_status != __SELECT_PENDING ) 136 return false; 137 138 // toggle current status flag to avoid starvation/deadlock 139 __make_select_node_unsat( mine ); 140 cmp_status = __SELECT_UNSAT; 141 if ( ! __atomic_compare_exchange_n( mine.clause_status, &cmp_status, __SELECT_PENDING, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) 142 return false; 143 cmp_status = __SELECT_UNSAT; 144 } 145 return true; 146 146 } 147 147 148 148 static inline bool __make_select_node_pending( select_node & this ) with( this ) { 149 149 return __mark_select_node( this, __SELECT_PENDING ); 150 150 } 151 151 … … 153 153 // return true if we want to unpark the thd 154 154 static inline bool __make_select_node_available( select_node & this ) with( this ) { 155 156 if( !park_counter )157 158 159 160 161 162 163 && !__atomic_add_fetch( park_counter, 1, __ATOMIC_SEQ_CST);155 /* paranoid */ verify( clause_status != 0p ); 156 if ( ! park_counter ) 157 return __mark_select_node( this, (unsigned long int)&this ); 158 159 unsigned long int cmp_status = __SELECT_UNSAT; 160 161 return *clause_status == 0 // C_TODO might not need a cmp_xchg in non special OR case 162 && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) // can maybe just use atomic write 163 && ! __atomic_add_fetch( park_counter, 1, __ATOMIC_SEQ_CST); 164 164 } 165 165 166 166 // Handles the special OR case of the waituntil statement 167 167 // Since only one select node can win in the OR case, we need to race to set the node available BEFORE 168 // 168 // performing the operation since if we lose the race the operation should not be performed as it will be lost 169 169 // Returns true if execution can continue normally and false if the queue has now been drained 170 170 static inline bool __handle_waituntil_OR( dlist( select_node ) & queue ) { 171 if ( queue`isEmpty) return false;172 if ( queue`first.clause_status && !queue`first.park_counter ) {173 while ( !queue`isEmpty) {174 175 if ( !queue`first.clause_status || queue`first.park_counter || __make_select_node_available( queue`first) )176 177 178 try_pop_front( queue );179 180 181 182 171 if ( isEmpty( queue ) ) return false; 172 if ( first( queue ).clause_status && ! first( queue ).park_counter ) { 173 while ( ! isEmpty( queue ) ) { 174 // if node not a special OR case or if we win the special OR case race break 175 if ( ! first( queue ).clause_status || first( queue ).park_counter || __make_select_node_available( first( queue ) ) ) 176 return true; 177 // otherwise we lost the special OR race so discard node 178 remove_first( queue ); 179 } 180 return false; 181 } 182 return true; 183 183 } 184 184 185 185 // wake one thread from the list 186 186 static inline void wake_one( dlist( select_node ) & /*queue*/, select_node & popped ) { 187 if ( !popped.clause_status// normal case, node is not a select node188 || ( popped.clause_status && !popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available189 || __make_select_node_available( popped ) )// check if popped link belongs to a selecting thread190 191 } 192 193 static inline void wake_one( dlist( select_node ) & queue ) { wake_one( queue, try_pop_front( queue ) ); }187 if ( ! popped.clause_status // normal case, node is not a select node 188 || ( popped.clause_status && ! popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available 189 || __make_select_node_available( popped ) ) // check if popped link belongs to a selecting thread 190 unpark( popped.blocked_thread ); 191 } 192 193 static inline void wake_one( dlist( select_node ) & queue ) { wake_one( queue, remove_first( queue ) ); } 194 194 195 195 static inline void setup_clause( select_node & this, unsigned long int * clause_status, int * park_counter ) { 196 197 198 196 this.blocked_thread = active_thread(); 197 this.clause_status = clause_status; 198 this.park_counter = park_counter; 199 199 } 200 200 201 201 // waituntil ( timeout( ... ) ) support 202 202 struct select_timeout_node { 203 204 203 alarm_node_t a_node; 204 select_node * s_node; 205 205 }; 206 206 void ?{}( select_timeout_node & this, Duration duration, Alarm_Callback callback ); -
libcfa/src/executor.cfa
rf85de47 r65bd3c2 21 21 T * remove( Buffer(T, TLink) & mutex buf ) with(buf) { 22 22 dlist( T, TLink ) * qptr = &queue; // workaround https://cforall.uwaterloo.ca/trac/ticket/166 23 // if ( (*qptr)`isEmpty) wait( delay ); // no request to process ? => wait24 if ( (*qptr)`isEmpty ) return 0p;// no request to process ? => wait23 // if ( isEmpty( *qptr ) ) wait( delay ); // no request to process ? => wait 24 if ( isEmpty( *qptr ) ) return 0p; // no request to process ? => wait 25 25 return &try_pop_front( *qptr ); 26 26 } // remove … … 93 93 unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers; 94 94 // for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) { 95 96 95 for ( i; nworkers : start; 0u ~ @ ~ range : range; ) { 96 range = reqPerWorker + ( i < extras ? 1 : 0 ); 97 97 workers[i] = new( cluster, requests, start, range ); 98 98 } // for -
src/Common/ScopedMap.hpp
rf85de47 r65bd3c2 10 10 // Created On : Wed Dec 2 11:37:00 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Feb 15 08:41:28 202213 // Update Count : 512 // Last Modified On : Mon May 13 07:32:09 2024 13 // Update Count : 6 14 14 // 15 15 … … 31 31 class ScopedMap { 32 32 typedef std::map< Key, Value > MapType; 33 public: 33 34 struct Scope { 34 35 MapType map; … … 44 45 Scope & operator= (Scope &&) = default; 45 46 }; 47 private: 46 48 typedef std::vector< Scope > ScopeList; 47 49 -
src/Common/SemanticError.hpp
rf85de47 r65bd3c2 10 10 // Created On : Mon May 18 07:44:20 2015 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sun Dec 15 21:04:32 202413 // Update Count : 7 712 // Last Modified On : Tue Apr 1 11:11:33 2025 13 // Update Count : 79 14 14 // 15 15 … … 65 65 {"gcc-attributes" , Severity::Warn, "invalid attribute: %s." }, 66 66 {"c++-like-copy" , Severity::Warn, "Constructor from reference is not a valid copy constructor." }, 67 {"depreciated-trait-syntax" , Severity::Warn, "trait type-parameters are now specified using the forall clause." },67 {"depreciated-trait-syntax" , Severity::Warn, "trait type-parameters, trait name(T,U), now specified using forall clause, forall(T,U) trait name." }, 68 68 }; 69 69 -
tests/collections/atomic_mpsc.cfa
rf85de47 r65bd3c2 13 13 void ?{}(some_node & this) { this.next = 0p; } 14 14 15 static inline some_node * volatile & ?`next( some_node * node ) {15 static inline some_node * volatile & next( some_node * node ) { 16 16 return node->next; 17 17 } -
tests/list/dlist-insert-remove.cfa
rf85de47 r65bd3c2 59 59 do { 60 60 sout | f.adatum; 61 } while ( f`moveNext);61 } while (advance( f )); 62 62 } 63 63 … … 66 66 do { 67 67 sout | f.adatum; 68 } while ( f`movePrev);68 } while (recede( f )); 69 69 } 70 70 … … 94 94 do { 95 95 sout | f.adatum; 96 } while ( f`moveNext);96 } while (advance( f )); 97 97 } 98 98 … … 101 101 do { 102 102 sout | f.adatum; 103 } while ( f`movePrev);103 } while (recede( f )); 104 104 } 105 105 … … 127 127 do { 128 128 sout | m.anotherdatum; 129 } while ( m`moveNext);129 } while (advance( m )); 130 130 } 131 131 … … 133 133 do { 134 134 sout | m.anotherdatum; 135 } while ( m`movePrev);135 } while (recede( m )); 136 136 } 137 137 … … 423 423 dlist(fred, fred.mine) lf; 424 424 425 assert( & lf`first== 0p );426 assert( & lf`last== 0p );425 assert( &first( lf ) == 0p ); 426 assert( &last( lf ) == 0p ); 427 427 428 428 insert_first(lf, f1); 429 429 430 assert( & lf`first == &f1 );431 assert( & lf`last == &f1 );430 assert( &first( lf ) == &f1 ); 431 assert( &last( lf ) == &f1 ); 432 432 433 433 verify(validate(lf)); … … 440 440 printYourFreddies(f1, f2, 0); // 3.14; 3.14; 0.5; 0.5 (unmodified) 441 441 442 assert( & lf`first == &f1 );443 assert( & lf`last == &f2 );442 assert( &first( lf ) == &f1 ); 443 assert( &last( lf ) == &f2 ); 444 444 } 445 445 … … 451 451 dlist(fred, fred.yours) lf; 452 452 453 assert( & lf`first== 0p );454 assert( & lf`last== 0p );453 assert( &first( lf ) == 0p ); 454 assert( &last( lf ) == 0p ); 455 455 456 456 insert_first(lf, f1); 457 457 458 assert( & lf`first== & f1 );459 assert( & lf`last== & f1 );458 assert( &first( lf ) == & f1 ); 459 assert( &last( lf ) == & f1 ); 460 460 461 461 verify(validate(lf)); … … 468 468 printYourFreddies(f1, f2, 0); // 3.14, 0.5; 3.14; 0.5; 0.5, 3.14 (modified) 469 469 470 assert( & lf`first== & f1 );471 assert( & lf`last== & f2 );470 assert( &first( lf ) == & f1 ); 471 assert( &last( lf ) == & f2 ); 472 472 } 473 473 … … 479 479 dlist(mary) lm; 480 480 481 assert( & lm`first== 0p );482 assert( & lm`last== 0p );481 assert( &first( lm ) == 0p ); 482 assert( &last( lm ) == 0p ); 483 483 484 484 insert_first(lm, m1); 485 485 486 assert( & lm`first== & m1 );487 assert( & lm`last== & m1 );486 assert( &first( lm ) == & m1 ); 487 assert( &last( lm ) == & m1 ); 488 488 489 489 verify(validate(lm)); … … 495 495 printMariatheotokos(m1, m2, 0); // 3.14, 0.5; 3.14; 0.5; 0.5, 3.14 (modified) 496 496 497 assert( & lm`first== & m1 );498 assert( & lm`last== & m2 );497 assert( &first( lm ) == & m1 ); 498 assert( &last( lm ) == & m2 ); 499 499 } 500 500 … … 516 516 dlist(fred, fred.mine) lf; 517 517 518 assert( & lf`first== 0p );519 assert( & lf`last== 0p );518 assert( &first( lf ) == 0p ); 519 assert( &last( lf ) == 0p ); 520 520 521 521 insert_last(lf, f2); 522 522 523 assert( & lf`first== & f2 );524 assert( & lf`last== & f2 );523 assert( &first( lf ) == & f2 ); 524 assert( &last( lf ) == & f2 ); 525 525 526 526 verify(validate(lf)); … … 533 533 printYourFreddies(f1, f2, 0); // 3.14; 3.14; 0.5; 0.5 (unmodified) 534 534 535 assert( & lf`first== & f1 );536 assert( & lf`last== & f2 );535 assert( &first( lf ) == & f1 ); 536 assert( &last( lf ) == & f2 ); 537 537 } 538 538 … … 544 544 dlist(fred, fred.yours) lf; 545 545 546 assert( & lf`first== 0p );547 assert( & lf`last== 0p );546 assert( &first( lf ) == 0p ); 547 assert( &last( lf ) == 0p ); 548 548 549 549 insert_last(lf, f2); 550 550 551 assert( & lf`first== & f2 );552 assert( & lf`last== & f2 );551 assert( &first( lf ) == & f2 ); 552 assert( &last( lf ) == & f2 ); 553 553 554 554 verify(validate(lf)); … … 561 561 printYourFreddies(f1, f2, 0); // 3.14, 0.5; 3.14; 0.5; 0.5, 3.14 (modified) 562 562 563 assert( & lf`first== & f1 );564 assert( & lf`last== & f2 );563 assert( &first( lf ) == & f1 ); 564 assert( &last( lf ) == & f2 ); 565 565 } 566 566 … … 572 572 dlist(mary) lm; 573 573 574 assert( & lm`first== 0p );575 assert( & lm`last== 0p );574 assert( &first( lm ) == 0p ); 575 assert( &last( lm ) == 0p ); 576 576 577 577 insert_last(lm, m2); 578 578 579 assert( & lm`first== & m2 );580 assert( & lm`last== & m2 );579 assert( &first( lm ) == & m2 ); 580 assert( &last( lm ) == & m2 ); 581 581 582 582 verify(validate(lm)); … … 588 588 printMariatheotokos(m1, m2, 0); // 3.14, 0.5; 3.14; 0.5; 0.5, 3.14 (modified) 589 589 590 assert( & lm`first== & m1 );591 assert( & lm`last== & m2 );590 assert( &first( lm ) == & m1 ); 591 assert( &last( lm ) == & m2 ); 592 592 } 593 593 #if 0 … … 891 891 insert_last(fly, f3); 892 892 893 printMyFreddies(f lm`first, flm`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7894 printYourFreddies(f ly`first, fly`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7893 printMyFreddies(first( flm ), last( flm ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 894 printYourFreddies(first( fly ), last( fly ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 895 895 896 896 verify(validate(fly)); … … 902 902 verify(validate(flm)); 903 903 904 printMyFreddies(f lm`first, flm`last, 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified)905 printYourFreddies(f ly`first, fly`last, 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified)904 printMyFreddies(first( flm ), last( flm ), 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified) 905 printYourFreddies(first( fly ), last( fly ), 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified) 906 906 907 907 // observe f1 is now solo in mine; in yours, it was just traversed … … 930 930 insert_last(fly, f3); 931 931 932 printMyFreddies(f lm`first, flm`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7933 printYourFreddies(f ly`first, fly`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7932 printMyFreddies(first( flm ), last( flm ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 933 printYourFreddies(first( fly ), last( fly ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 934 934 935 935 verify(validate(fly)); … … 941 941 verify(validate(flm)); 942 942 943 printMyFreddies(f lm`first, flm`last, 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified)944 printYourFreddies(f ly`first, fly`last, 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified)943 printMyFreddies(first( flm ), last( flm ), 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified) 944 printYourFreddies(first( fly ), last( fly ), 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified) 945 945 946 946 // observe f1 is now solo in yours; in mine, it was just traversed … … 963 963 insert_last(ml, m3); 964 964 965 printMariatheotokos( ml`first, ml`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7965 printMariatheotokos(first( ml ), last( ml ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 966 966 967 967 verify(validate(ml)); … … 971 971 verify(validate(ml)); 972 972 973 printMariatheotokos( ml`first, ml`last, 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified)973 printMariatheotokos(first( ml ), last( ml ), 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified) 974 974 975 975 // observe m1 is now solo … … 1007 1007 insert_last(fly, f3); 1008 1008 1009 printMyFreddies(f lm`first, flm`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71010 printYourFreddies(f ly`first, fly`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71009 printMyFreddies(first( flm ), last( flm ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1010 printYourFreddies(first( fly ), last( fly ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1011 1011 1012 1012 verify(validate(fly)); … … 1018 1018 verify(validate(flm)); 1019 1019 1020 printMyFreddies(f lm`first, flm`last, 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified)1021 printYourFreddies(f ly`first, fly`last, 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified)1020 printMyFreddies(first( flm ), last( flm ), 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified) 1021 printYourFreddies(first( fly ), last( fly ), 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified) 1022 1022 1023 1023 // observe f3 is now solo in mine; in yours, it was just traversed … … 1045 1045 insert_last(fly, f3); 1046 1046 1047 printMyFreddies(f lm`first, flm`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71048 printYourFreddies(f ly`first, fly`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71047 printMyFreddies(first( flm ), last( flm ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1048 printYourFreddies(first( fly ), last( fly ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1049 1049 1050 1050 verify(validate(fly)); … … 1056 1056 verify(validate(flm)); 1057 1057 1058 printMyFreddies(f lm`first, flm`last, 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified)1059 printYourFreddies(f ly`first, fly`last, 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified)1058 printMyFreddies(first( flm ), last( flm ), 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified) 1059 printYourFreddies(first( fly ), last( fly ), 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified) 1060 1060 1061 1061 // observe f3 is now solo in yours; in mine, it was just traversed … … 1078 1078 insert_last(ml, m3); 1079 1079 1080 printMariatheotokos( ml`first, ml`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71080 printMariatheotokos(first( ml ), last( ml ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1081 1081 1082 1082 verify(validate(ml)); … … 1086 1086 verify(validate(ml)); 1087 1087 1088 printMariatheotokos( ml`first, ml`last, 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified)1088 printMariatheotokos(first( ml ), last( ml ), 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified) 1089 1089 1090 1090 // observe m3 is now solo … … 1116 1116 insert_last(fly, f); 1117 1117 1118 printMyFreddies(f lm`first, flm`last, 1); // 0.7; 0.7; 0.7; 0.71119 printYourFreddies(f ly`first, fly`last, 1); // 0.7; 0.7; 0.7; 0.71118 printMyFreddies(first( flm ), last( flm ), 1); // 0.7; 0.7; 0.7; 0.7 1119 printYourFreddies(first( fly ), last( fly ), 1); // 0.7; 0.7; 0.7; 0.7 1120 1120 1121 1121 verify(validate(fly)); … … 1127 1127 verify(validate(flm)); 1128 1128 1129 assert( & flm`first== 0p );1130 assert( & flm`last== 0p );1131 1132 printYourFreddies(f ly`first, fly`last, 0); // 0.7; 0.7; 0.7; 0.7 (unmodified)1129 assert( &first( flm ) == 0p ); 1130 assert( &last( flm ) == 0p ); 1131 1132 printYourFreddies(first( fly ), last( fly ), 0); // 0.7; 0.7; 0.7; 0.7 (unmodified) 1133 1133 1134 1134 // observe f is solo in mine (now unlisted); in yours, it was just traversed … … 1142 1142 verify(validate(fly)); 1143 1143 verify(validate(flm)); 1144 printMyFreddies(f lm`first, flm`last, 0); // 0.7; 0.7; 0.7; 0.71144 printMyFreddies(first( flm ), last( flm ), 0); // 0.7; 0.7; 0.7; 0.7 1145 1145 } 1146 1146 … … 1155 1155 insert_last(fly, f); 1156 1156 1157 printMyFreddies(f lm`first, flm`last, 1); // 0.7; 0.7; 0.7; 0.71158 printYourFreddies(f ly`first, fly`last, 1); // 0.7; 0.7; 0.7; 0.71157 printMyFreddies(first( flm ), last( flm ), 1); // 0.7; 0.7; 0.7; 0.7 1158 printYourFreddies(first( fly ), last( fly ), 1); // 0.7; 0.7; 0.7; 0.7 1159 1159 1160 1160 verify(validate(fly)); … … 1166 1166 verify(validate(flm)); 1167 1167 1168 assert( & fly`first== 0p );1169 assert( & fly`last== 0p );1170 1171 printYourFreddies(f lm`first, flm`last, 0); // 0.7; 0.7; 0.7; 0.7 (unmodified)1168 assert( &first( fly ) == 0p ); 1169 assert( &last( fly ) == 0p ); 1170 1171 printYourFreddies(first( flm ), last( flm ), 0); // 0.7; 0.7; 0.7; 0.7 (unmodified) 1172 1172 1173 1173 // observe f is solo in yours (now unlisted); in mine, it was just traversed … … 1181 1181 verify(validate(fly)); 1182 1182 verify(validate(flm)); 1183 printYourFreddies(f ly`first, fly`last, 0); // 0.7; 0.7; 0.7; 0.71183 printYourFreddies(first( fly ), last( fly ), 0); // 0.7; 0.7; 0.7; 0.7 1184 1184 } 1185 1185 … … 1191 1191 insert_last(ml, m); 1192 1192 1193 printMariatheotokos( ml`first, ml`last, 1); // 0.7; 0.7; 0.7; 0.71193 printMariatheotokos(first( ml ), last( ml ), 1); // 0.7; 0.7; 0.7; 0.7 1194 1194 1195 1195 verify(validate(ml)); … … 1199 1199 verify(validate(ml)); 1200 1200 1201 assert( & ml`first== 0p );1202 assert( & ml`last== 0p );1201 assert( &first( ml ) == 0p ); 1202 assert( &last( ml ) == 0p ); 1203 1203 1204 1204 // observe f is solo in mine (now unlisted); in yours, it was just traversed … … 1211 1211 insert_last(ml, m); 1212 1212 verify(validate(ml)); 1213 printMariatheotokos( ml`first, ml`last, 0); // 0.7; 0.7; 0.7; 0.71213 printMariatheotokos(first( ml ), last( ml ), 0); // 0.7; 0.7; 0.7; 0.7 1214 1214 } 1215 1215 … … 1242 1242 insert_last(fly, f3); 1243 1243 1244 printMyFreddies(f lm`first, flm`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71245 printYourFreddies(f ly`first, fly`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71246 1247 verify(validate(fly)); 1248 verify(validate(flm)); 1249 1250 fred & popped = try_pop_front(flm);1251 1252 verify(validate(fly)); 1253 verify(validate(flm)); 1254 1255 printMyFreddies(f lm`first, flm`last, 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified)1256 printYourFreddies(f ly`first, fly`last, 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified)1244 printMyFreddies(first( flm ), last( flm ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1245 printYourFreddies(first( fly ), last( fly ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1246 1247 verify(validate(fly)); 1248 verify(validate(flm)); 1249 1250 fred & popped = remove_first(flm); 1251 1252 verify(validate(fly)); 1253 verify(validate(flm)); 1254 1255 printMyFreddies(first( flm ), last( flm ), 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified) 1256 printYourFreddies(first( fly ), last( fly ), 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified) 1257 1257 1258 1258 // observe f1 is now solo in mine; in yours, it was just traversed … … 1278 1278 insert_last(fly, f3); 1279 1279 1280 printMyFreddies(f lm`first, flm`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71281 printYourFreddies(f ly`first, fly`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71282 1283 verify(validate(fly)); 1284 verify(validate(flm)); 1285 1286 fred & popped = try_pop_front(fly);1287 1288 verify(validate(fly)); 1289 verify(validate(flm)); 1290 1291 printMyFreddies(f lm`first, flm`last, 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified)1292 printYourFreddies(f ly`first, fly`last, 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified)1280 printMyFreddies(first( flm ), last( flm ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1281 printYourFreddies(first( fly ), last( fly ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1282 1283 verify(validate(fly)); 1284 verify(validate(flm)); 1285 1286 fred & popped = remove_first(fly); 1287 1288 verify(validate(fly)); 1289 verify(validate(flm)); 1290 1291 printMyFreddies(first( flm ), last( flm ), 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified) 1292 printYourFreddies(first( fly ), last( fly ), 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified) 1293 1293 1294 1294 // observe f1 is now solo in yours; in mine, it was just traversed … … 1309 1309 insert_last(ml, m3); 1310 1310 1311 printMariatheotokos( ml`first, ml`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71311 printMariatheotokos(first( ml ), last( ml ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1312 1312 1313 1313 verify(validate(ml)); 1314 1314 1315 mary & popped = try_pop_front(ml);1315 mary & popped = remove_first(ml); 1316 1316 1317 1317 verify(validate(ml)); 1318 1318 1319 printMariatheotokos( ml`first, ml`last, 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified)1319 printMariatheotokos(first( ml ), last( ml ), 0); // 2.7, 3.7; 2.7; 3.7; 3.7, 2.7 (modified) 1320 1320 1321 1321 // observe m1 is now solo … … 1341 1341 insert_last(fly, f3); 1342 1342 1343 printMyFreddies(f lm`first, flm`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71344 printYourFreddies(f ly`first, fly`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71345 1346 verify(validate(fly)); 1347 verify(validate(flm)); 1348 1349 fred & popped = try_pop_back(flm);1350 1351 verify(validate(fly)); 1352 verify(validate(flm)); 1353 1354 printMyFreddies(f lm`first, flm`last, 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified)1355 printYourFreddies(f ly`first, fly`last, 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified)1343 printMyFreddies(first( flm ), last( flm ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1344 printYourFreddies(first( fly ), last( fly ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1345 1346 verify(validate(fly)); 1347 verify(validate(flm)); 1348 1349 fred & popped = remove_last(flm); 1350 1351 verify(validate(fly)); 1352 verify(validate(flm)); 1353 1354 printMyFreddies(first( flm ), last( flm ), 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified) 1355 printYourFreddies(first( fly ), last( fly ), 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified) 1356 1356 1357 1357 // observe f3 is now solo in mine; in yours, it was just traversed … … 1377 1377 insert_last(fly, f3); 1378 1378 1379 printMyFreddies(f lm`first, flm`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71380 printYourFreddies(f ly`first, fly`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71381 1382 verify(validate(fly)); 1383 verify(validate(flm)); 1384 1385 fred & popped = try_pop_back(fly);1386 1387 verify(validate(fly)); 1388 verify(validate(flm)); 1389 1390 printMyFreddies(f lm`first, flm`last, 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified)1391 printYourFreddies(f ly`first, fly`last, 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified)1379 printMyFreddies(first( flm ), last( flm ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1380 printYourFreddies(first( fly ), last( fly ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1381 1382 verify(validate(fly)); 1383 verify(validate(flm)); 1384 1385 fred & popped = remove_last(fly); 1386 1387 verify(validate(fly)); 1388 verify(validate(flm)); 1389 1390 printMyFreddies(first( flm ), last( flm ), 0); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 (unmodified) 1391 printYourFreddies(first( fly ), last( fly ), 0); // 1.7, 2.7; 1.7; 2.7; 2.7, 1.7 (modified) 1392 1392 1393 1393 // observe f3 is now solo in yours; in mine, it was just traversed … … 1408 1408 insert_last(ml, m3); 1409 1409 1410 printMariatheotokos( ml`first, ml`last, 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.71410 printMariatheotokos(first( ml ), last( ml ), 1); // 1.7, 2.7, 3.7; 1.7; 3.7; 3.7, 2.7, 1.7 1411 1411 1412 1412 verify(validate(ml)); 1413 1413 1414 mary & popped = try_pop_back(ml);1414 mary & popped = remove_last(ml); 1415 1415 1416 1416 verify(validate(ml)); 1417 1417 1418 printMariatheotokos( ml`first, ml`last, 0); // 1.7, 1.7; 1.7; 2.7; 2.7, 1.7 (modified)1418 printMariatheotokos(first( ml ), last( ml ), 0); // 1.7, 1.7; 1.7; 2.7; 2.7, 1.7 (modified) 1419 1419 1420 1420 // observe m1 is now solo … … 1428 1428 // Section 4g 1429 1429 // 1430 // Test cases of `isEmpty, `hasPrev, `hasNext,1431 // try_pop_front, try_pop_back, modifications via `elems1430 // Test cases of isEmpty, isFirst, isLast, 1431 // remove_first, remove_last, modifications via iter 1432 1432 // 1433 1433 // Example of call-side user code … … 1441 1441 mary m3 = {3.7}; 1442 1442 1443 dlist(mary) ml; assert( ml`isEmpty);1444 1445 insert_last(ml, m1); assert(! ml`isEmpty);1446 insert_last(ml, m2); assert(! ml`isEmpty);1447 insert_last(ml, m3); assert(! ml`isEmpty);1448 1449 mary & m1prev = m1`prev;1450 mary & m1next = m1`next;1451 mary & m2prev = m2`prev;1452 mary & m2next = m2`next;1453 mary & m3prev = m3`prev;1454 mary & m3next = m3`next;1455 1456 assert (&m1prev == 0p);1457 assert (&m1next == &m2);1458 assert (&m2prev == &m1);1459 assert (&m2next == &m3);1460 assert (&m3prev == &m2);1461 assert (&m3next == 0p);1462 1463 assert( !m1`hasPrev);1464 assert( m1`hasNext);1465 assert( m2`hasPrev);1466 assert( m2`hasNext);1467 assert( m3`hasPrev);1468 assert( !m3`hasNext);1443 dlist(mary) ml; assert( isEmpty( ml )); 1444 1445 insert_last(ml, m1); assert(!isEmpty( ml )); 1446 insert_last(ml, m2); assert(!isEmpty( ml )); 1447 insert_last(ml, m3); assert(!isEmpty( ml )); 1448 1449 mary & m1prev = prev( m1 ); 1450 mary & m1next = next( m1 ); 1451 mary & m2prev = prev( m2 ); 1452 mary & m2next = next( m2 ); 1453 mary & m3prev = prev( m3 ); 1454 mary & m3next = next( m3 ); 1455 1456 assert( &m1prev == 0p ); 1457 assert( &m1next == &m2 ); 1458 assert( &m2prev == &m1 ); 1459 assert( &m2next == &m3 ); 1460 assert( &m3prev == &m2 ); 1461 assert( &m3next == 0p ); 1462 1463 assert( ! isFirst( m1 ) ); 1464 assert( isLast( m1 ) ); 1465 assert( isFirst( m2 ) ); 1466 assert( isLast( m2 ) ); 1467 assert( isFirst( m3 ) ); 1468 assert( ! isLast( m3 ) ); 1469 1469 1470 1470 printf("accessor_cases done\n"); … … 1486 1486 // queue, back to front 1487 1487 1488 assert( ml`isEmpty);1488 assert( isEmpty( ml )); 1489 1489 1490 1490 insert_last(ml, m1); … … 1492 1492 insert_last(ml, m3); 1493 1493 1494 &m1r = & try_pop_front(ml); assert(!ml`isEmpty);1495 &m2r = & try_pop_front(ml); assert(!ml`isEmpty);1496 &m3r = & try_pop_front(ml); assert( ml`isEmpty);1497 &mxr = & try_pop_front(ml); assert( ml`isEmpty);1494 &m1r = & remove_first(ml); assert(!isEmpty( ml )); 1495 &m2r = & remove_first(ml); assert(!isEmpty( ml )); 1496 &m3r = & remove_first(ml); assert( isEmpty( ml )); 1497 &mxr = & remove_first(ml); assert( isEmpty( ml )); 1498 1498 1499 1499 assert( &m1r == &m1 ); … … 1508 1508 // queue, front to back 1509 1509 1510 assert( ml`isEmpty);1510 assert( isEmpty( ml )); 1511 1511 1512 1512 insert_first(ml, m1); … … 1514 1514 insert_first(ml, m3); 1515 1515 1516 &m1r = & try_pop_back(ml); assert(!ml`isEmpty);1517 &m2r = & try_pop_back(ml); assert(!ml`isEmpty);1518 &m3r = & try_pop_back(ml); assert( ml`isEmpty);1519 &mxr = & try_pop_back(ml); assert( ml`isEmpty);1516 &m1r = & remove_last(ml); assert(!isEmpty( ml )); 1517 &m2r = & remove_last(ml); assert(!isEmpty( ml )); 1518 &m3r = & remove_last(ml); assert( isEmpty( ml )); 1519 &mxr = & remove_last(ml); assert( isEmpty( ml )); 1520 1520 1521 1521 assert( &m1r == &m1 ); … … 1530 1530 // stack at front 1531 1531 1532 assert( ml`isEmpty);1532 assert( isEmpty( ml )); 1533 1533 1534 1534 insert_first(ml, m1); … … 1536 1536 insert_first(ml, m3); 1537 1537 1538 &m3r = & try_pop_front(ml); assert(!ml`isEmpty);1539 &m2r = & try_pop_front(ml); assert(!ml`isEmpty);1540 &m1r = & try_pop_front(ml); assert( ml`isEmpty);1541 &mxr = & try_pop_front(ml); assert( ml`isEmpty);1538 &m3r = & remove_first(ml); assert(!isEmpty( ml )); 1539 &m2r = & remove_first(ml); assert(!isEmpty( ml )); 1540 &m1r = & remove_first(ml); assert( isEmpty( ml )); 1541 &mxr = & remove_first(ml); assert( isEmpty( ml )); 1542 1542 1543 1543 assert( &m1r == &m1 ); … … 1552 1552 // stack at back 1553 1553 1554 assert( ml`isEmpty);1554 assert( isEmpty( ml )); 1555 1555 1556 1556 insert_last(ml, m1); … … 1558 1558 insert_last(ml, m3); 1559 1559 1560 &m3r = & try_pop_back(ml); assert(!ml`isEmpty);1561 &m2r = & try_pop_back(ml); assert(!ml`isEmpty);1562 &m1r = & try_pop_back(ml); assert( ml`isEmpty);1563 &mxr = & try_pop_back(ml); assert( ml`isEmpty);1560 &m3r = & remove_last(ml); assert(!isEmpty( ml )); 1561 &m2r = & remove_last(ml); assert(!isEmpty( ml )); 1562 &m1r = & remove_last(ml); assert( isEmpty( ml )); 1563 &mxr = & remove_last(ml); assert( isEmpty( ml )); 1564 1564 1565 1565 assert( &m1r == &m1 ); … … 1580 1580 1581 1581 dlist(mary) ml; 1582 mary & mlorigin = ml`elems;1582 mary & mlorigin = iter( ml ); 1583 1583 1584 1584 // insert before the origin 1585 1585 1586 insert_before( ml`elems, m1 );1587 assert( ! ml`isEmpty);1588 1589 mary & mlfirst = ml`first;1590 mary & mllast = ml`last;1586 insert_before( iter( ml ), m1 ); 1587 assert( ! isEmpty( ml ) ); 1588 1589 mary & mlfirst = first( ml ); 1590 mary & mllast = last( ml ); 1591 1591 1592 1592 assert( &m1 == & mlfirst ); … … 1595 1595 // moveNext after last goes back to origin, &vv 1596 1596 1597 bool canMoveNext = mllast`moveNext;1598 bool canMovePrev = mlfirst`movePrev;1597 bool canMoveNext = advance( mllast ); 1598 bool canMovePrev = recede( mlfirst ); 1599 1599 1600 1600 assert( ! canMoveNext ); … … 1609 1609 void test__isListed_cases__mary() { 1610 1610 1611 mary m1 = {1.7}; assert( ! m1`isListed);1612 mary m2 = {2.7}; assert( ! m2`isListed);1613 mary m3 = {3.7}; assert( ! m3`isListed);1611 mary m1 = {1.7}; assert( ! isListed( m1 ) ); 1612 mary m2 = {2.7}; assert( ! isListed( m2 ) ); 1613 mary m3 = {3.7}; assert( ! isListed( m3 ) ); 1614 1614 1615 1615 dlist(mary) ml; 1616 1616 1617 insert_last(ml, m1); assert( m1`isListed); assert(! m2`isListed);1618 insert_last(ml, m2); assert( m2`isListed); assert(! m3`isListed);1619 insert_last(ml, m3); assert( m3`isListed);1620 1621 remove( m1 ); assert( ! m1`isListed); assert( m2`isListed);1622 remove( m2 ); assert( ! m2`isListed); assert( m3`isListed);1623 remove( m3 ); assert( ! m3`isListed);1617 insert_last(ml, m1); assert( isListed( m1 ) ); assert(! isListed( m2 ) ); 1618 insert_last(ml, m2); assert( isListed( m2 ) ); assert(! isListed( m3 ) ); 1619 insert_last(ml, m3); assert( isListed( m3 ) ); 1620 1621 remove( m1 ); assert( ! isListed( m1 ) ); assert( isListed( m2 ) ); 1622 remove( m2 ); assert( ! isListed( m2 ) ); assert( isListed( m3 ) ); 1623 remove( m3 ); assert( ! isListed( m3 ) ); 1624 1624 1625 1625 printf("isListed cases done\n"); -
tests/zombies/hashtable.cfa
rf85de47 r65bd3c2 71 71 dlist(tN, tE) & bucket = buckets[ bucket_of(this, k) ]; 72 72 73 for ( tN * item = & $tempcv_e2n( bucket`first); item != 0p; item = & $tempcv_e2n((*item)`next) ) {73 for ( tN * item = & $tempcv_e2n(first( bucket )); item != 0p; item = & $tempcv_e2n((*item)`next) ) { 74 74 if ( key(*item) == k ) { 75 75 return *item; … … 94 94 dlist(tN, tE) & bucket = buckets[ bucket_of(this, k) ]; 95 95 96 for ( tN * item = & $tempcv_e2n( bucket`first); item != 0p; item = & $tempcv_e2n((*item)`next) ) {96 for ( tN * item = & $tempcv_e2n(first( bucket )); item != 0p; item = & $tempcv_e2n((*item)`next) ) { 97 97 if ( key(*item) == k ) { 98 98 remove(*item); -
tests/zombies/hashtable2.cfa
rf85de47 r65bd3c2 149 149 dlist(request_in_ht_by_src, request) & bucket = buckets[ bucket_of(this, k) ]; 150 150 151 for ( request_in_ht_by_src * item = & $tempcv_e2n( bucket`first); item != 0p; item = & $tempcv_e2n((*item)`next) ) {151 for ( request_in_ht_by_src * item = & $tempcv_e2n(first( bucket )); item != 0p; item = & $tempcv_e2n((*item)`next) ) { 152 152 if ( key(*item) == k ) { 153 153 return *item; … … 177 177 dlist(request_in_ht_by_src, request) & bucket = buckets[ bucket_of(this, k) ]; 178 178 179 for ( request_in_ht_by_src * item = & $tempcv_e2n( bucket`first); item != 0p; item = & $tempcv_e2n((*item)`next) ) {179 for ( request_in_ht_by_src * item = & $tempcv_e2n(first( bucket )); item != 0p; item = & $tempcv_e2n((*item)`next) ) { 180 180 if ( key(*item) == k ) { 181 181 remove(*item); … … 257 257 258 258 // will re-implement as an actual splice 259 while ( & src_to_ empty`first!= 0p ) {259 while ( & src_to_first( empty ) != 0p ) { 260 260 insert_last( snk_to_fill_at_last, pop_first( src_to_empty ) ); 261 261 } … … 319 319 320 320 // fill new table with old items 321 while ( & items`first!= 0p ) {321 while ( &first( items ) != 0p ) { 322 322 put( this, pop_first( items ) ); 323 323 } -
tests/zombies/linked-list-perf/experiment.koad
rf85de47 r65bd3c2 144 144 for ( volatile unsigned int t = 0; t < Times; t += 1 ) { 145 145 Repeat( insert_last( lst, s[i] ) ); 146 Repeat( remove( lst`first) );146 Repeat( remove( first( lst ) ) ); 147 147 } 148 148 end = clock(); … … 168 168 for ( volatile unsigned int t = 0; t < Times; t += 1 ) { 169 169 Repeat( insert_last( lst, s[i] ) ); 170 Repeat( remove( lst`first) );170 Repeat( remove( first( lst ) ) ); 171 171 } 172 172 end = clock(); -
tests/zombies/linked-list-perf/mike-old.hfa
rf85de47 r65bd3c2 9 9 // Author : Michael Brooks 10 10 // Created On : Wed Apr 22 18:00:00 2020 11 // Last Modified By : Michael Brooks12 // Last Modified On : Wed Apr 22 18:00:00 202013 // Update Count : 111 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 21 17:32:37 2025 13 // Update Count : 3 14 14 // 15 15 … … 147 147 } 148 148 149 static inline Telem & ?`first( dlist(Tnode, Telem) &l ) {149 static inline Telem & first( dlist(Tnode, Telem) &l ) { 150 150 return * l.$links.next.elem; 151 151 } … … 157 157 #if !defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__)) 158 158 static bool $validate_fwd( dlist(Tnode, Telem) & this ) { 159 Tnode * it = & $tempcv_e2n( this`first);159 Tnode * it = & $tempcv_e2n( first( this ) ); 160 160 if (!it) return (& this`last == 0p); 161 161 … … 170 170 static bool $validate_rev( dlist(Tnode, Telem) & this ) { 171 171 Tnode * it = & $tempcv_e2n( this`last ); 172 if (!it) return (& this`first== 0p);172 if (!it) return (& first( this ) == 0p); 173 173 174 174 while( $prev_link(*it).elem ) { … … 176 176 } 177 177 178 return ( it == & $tempcv_e2n( this`first) ) &&178 return ( it == & $tempcv_e2n( first( this ) ) ) && 179 179 ( $prev_link(*it).is_terminator ) && 180 180 ( ((dlist(Tnode, Telem)*)$prev_link(*it).terminator) == &this );
Note:
See TracChangeset
for help on using the changeset viewer.