Changeset c083c3d
- Timestamp:
- May 1, 2023, 6:15:26 PM (19 months ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 67408114
- Parents:
- 4daf79f (diff), 6e1e2d0 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - git-author:
- Peter A. Buhr <pabuhr@…> (05/01/23 16:59:14)
- git-committer:
- Peter A. Buhr <pabuhr@…> (05/01/23 18:15:26)
- Files:
-
- 12 added
- 5 deleted
- 34 edited
Legend:
- Unmodified
- Added
- Removed
-
doc/theses/colby_parsons_MMAth/benchmarks/actors/data/nasus_CFA.txt
r4daf79f rc083c3d 1 1 5 2 2 1 2 4 8 16 24 32 48 3 CFA-LV CFA-NS CFA-R3 Longest-Victim No-Stealing Random 4 4 executor 5 CFA-LV:5 Longest-Victim: 6 6 proc time (s) 7 7 1 29.22 … … 45 45 48 1.20 46 46 48 1.20 47 CFA-NS:47 No-Stealing: 48 48 proc time (s) 49 49 1 28.25 … … 87 87 48 1.18 88 88 48 1.16 89 CFA-R:89 Random: 90 90 proc time (s) 91 91 1 28.58 … … 131 131 132 132 matrix 133 CFA-LV:133 Longest-Victim: 134 134 proc time (s) 135 135 1 105.48 … … 173 173 48 2.75 174 174 48 2.96 175 CFA-NS:175 No-Stealing: 176 176 proc time (s) 177 177 1 106.01 … … 215 215 48 2.78 216 216 48 2.92 217 CFA-R:217 Random: 218 218 proc time (s) 219 219 1 105.91 … … 259 259 260 260 repeat 261 CFA-LV:261 Longest-Victim: 262 262 proc time (s) 263 263 1 1.17 … … 301 301 48 13.73 302 302 48 14.55 303 CFA-NS:303 No-Stealing: 304 304 proc time (s) 305 305 1 1.15 … … 343 343 48 13.03 344 344 48 12.83 345 CFA-R:345 Random: 346 346 proc time (s) 347 347 1 1.15 … … 387 387 388 388 balance_one 389 CFA-LV:389 Longest-Victim: 390 390 proc time (s) 391 391 1 20.06 … … 429 429 48 1.11 430 430 48 1.12 431 CFA-NS:431 No-Stealing: 432 432 proc time (s) 433 433 1 20.13 … … 471 471 48 19.95 472 472 48 20.00 473 CFA-R:473 Random: 474 474 proc time (s) 475 475 1 19.92 … … 515 515 516 516 balance_multi 517 CFA-LV:517 Longest-Victim: 518 518 proc time (s) 519 519 1 8.17 … … 557 557 48 5.75 558 558 48 5.68 559 CFA-NS:559 No-Stealing: 560 560 proc time (s) 561 561 1 8.10 … … 599 599 48 9.28 600 600 48 9.26 601 CFA-R:601 Random: 602 602 proc time (s) 603 603 1 8.08 -
doc/theses/colby_parsons_MMAth/benchmarks/actors/data/pyke_CFA.txt
r4daf79f rc083c3d 1 1 5 2 2 1 2 4 8 16 24 32 48 3 CFA-LV CFA-NS CFA-R3 Longest-Victim No-Stealing Random 4 4 executor 5 CFA-LV:5 Longest-Victim: 6 6 proc time (s) 7 7 1 29.04 … … 45 45 48 2.58 46 46 48 2.55 47 CFA-NS:47 No-Stealing: 48 48 proc time (s) 49 49 1 28.15 … … 87 87 48 2.59 88 88 48 2.60 89 CFA-R:89 Random: 90 90 proc time (s) 91 91 1 29.06 … … 131 131 132 132 matrix 133 CFA-LV:133 Longest-Victim: 134 134 proc time (s) 135 135 1 127.44 … … 173 173 48 6.83 174 174 48 6.81 175 CFA-NS:175 No-Stealing: 176 176 proc time (s) 177 177 1 127.64 … … 215 215 48 6.77 216 216 48 6.74 217 CFA-R:217 Random: 218 218 proc time (s) 219 219 1 127.26 … … 259 259 260 260 repeat 261 CFA-LV:261 Longest-Victim: 262 262 proc time (s) 263 263 1 1.16 … … 301 301 48 19.75 302 302 48 19.71 303 CFA-NS:303 No-Stealing: 304 304 proc time (s) 305 305 1 1.18 … … 343 343 48 13.88 344 344 48 13.71 345 CFA-R:345 Random: 346 346 proc time (s) 347 347 1 1.18 … … 387 387 388 388 balance_one 389 CFA-LV:389 Longest-Victim: 390 390 proc time (s) 391 391 1 19.46 … … 429 429 48 2.12 430 430 48 2.17 431 CFA-NS:431 No-Stealing: 432 432 proc time (s) 433 433 1 21.00 … … 471 471 48 47.50 472 472 48 47.72 473 CFA-R:473 Random: 474 474 proc time (s) 475 475 1 20.81 … … 515 515 516 516 balance_multi 517 CFA-LV:517 Longest-Victim: 518 518 proc time (s) 519 519 1 7.94 … … 557 557 48 14.38 558 558 48 14.50 559 CFA-NS:559 No-Stealing: 560 560 proc time (s) 561 561 1 8.48 … … 599 599 48 21.50 600 600 48 21.15 601 CFA-R:601 Random: 602 602 proc time (s) 603 603 1 8.49 -
doc/theses/colby_parsons_MMAth/benchmarks/channels/plotData.py
r4daf79f rc083c3d 70 70 if currBench == Bench.Unset: 71 71 if line == "contend:": 72 name = "C ontend"72 name = "Channel Contention" 73 73 currBench = Bench.Contend 74 74 elif line == "zero:": -
doc/theses/colby_parsons_MMAth/code/swap_queues.cfa
r4daf79f rc083c3d 1 // this is a code stub and will not compile 2 3 // tries to atomically swap two queues and returns 0p if the swap failed 4 // returns ptr to newly owned queue if swap succeeds 5 static inline work_queue * try_swap_queues( worker & this, unsigned int victim_idx, unsigned int my_idx ) with(this) { 1 // sequential equivalent swap 2 void swap( uint victim_idx, uint my_idx ) { 3 // Step 0: 6 4 work_queue * my_queue = request_queues[my_idx]; 7 work_queue * other_queue = request_queues[victim_idx]; 8 9 // if either queue is 0p then they are in the process of being stolen 10 if ( other_queue == 0p || my_queue == 0p ) return 0p; 11 12 // try to set our queue ptr to be 0p. If it fails someone moved our queue so return false 13 if ( !__atomic_compare_exchange_n( &request_queues[my_idx], &my_queue, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) 14 return 0p; 15 16 // try to set other queue ptr to be our queue ptr. If it fails someone moved the other queue so fix up then return false 17 if ( !__atomic_compare_exchange_n( &request_queues[victim_idx], &other_queue, my_queue, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) { 18 /* paranoid */ verify( request_queues[my_idx] == 0p ); 19 request_queues[my_idx] = my_queue; // reset my queue ptr back to appropriate val 20 return 0p; 21 } 22 23 // we have successfully swapped and since our queue is 0p no one will touch it so write back new queue ptr non atomically 24 request_queues[my_idx] = other_queue; // last write does not need to be atomic 25 return other_queue; 5 work_queue * vic_queue = request_queues[victim_idx]; 6 // Step 2: 7 request_queues[my_idx] = 0p; 8 // Step 3: 9 request_queues[victim_idx] = my_queue; 10 // Step 4: 11 request_queues[my_idx] = vic_queue; 26 12 } 27 13 … … 35 21 36 22 bool try_swap_queues( worker & this, uint victim_idx, uint my_idx ) with(this) { 23 // Step 0: 24 // request_queues is the shared array of all sharded queues 37 25 work_queue * my_queue = request_queues[my_idx]; 38 26 work_queue * vic_queue = request_queues[victim_idx]; 39 27 28 // Step 1: 40 29 // If either queue is 0p then they are in the process of being stolen 41 30 // 0p is CForAll's equivalent of C++'s nullptr 42 if ( vic_queue == 0p || my_queue == 0p) return false;31 if ( vic_queue == 0p ) return false; 43 32 44 // Try to set our queue ptr to be 0p. 45 // If this CAS fails someone moved our queue so return false 33 // Step 2: 34 // Try to set thief's queue ptr to be 0p. 35 // If this CAS fails someone stole thief's queue so return false 46 36 if ( !CAS( &request_queues[my_idx], &my_queue, 0p ) ) 47 37 return false; 48 49 // Try to set other queue ptr to be our queue ptr. 50 // If it fails someone moved the other queue, so fix up then return false 38 39 // Step 3: 40 // Try to set victim queue ptr to be thief's queue ptr. 41 // If it fails someone stole the other queue, so fix up then return false 51 42 if ( !CAS( &request_queues[victim_idx], &vic_queue, my_queue ) ) { 52 43 request_queues[my_idx] = my_queue; // reset queue ptr back to prev val … … 54 45 } 55 46 47 // Step 4: 56 48 // Successfully swapped. 57 // Our queue is 0p so no one will touch it so write back without CAS is safe 49 // Thief's ptr is 0p so no one will touch it 50 // Write back without CAS is safe 58 51 request_queues[my_idx] = vic_queue; 59 52 return true; -
doc/theses/colby_parsons_MMAth/version.sh
-
Property
mode
changed from
100755
to100644
-
Property
mode
changed from
-
libcfa/src/bits/weakso_locks.cfa
r4daf79f rc083c3d 15 15 // Update Count : 16 16 // 17 18 17 #include "bits/weakso_locks.hfa" 19 20 18 #pragma GCC visibility push(default) 21 19 … … 30 28 void on_wakeup( blocking_lock &, size_t ) {} 31 29 size_t wait_count( blocking_lock & ) { return 0; } 30 bool register_select( blocking_lock & this, select_node & node ) { return false; } 31 bool unregister_select( blocking_lock & this, select_node & node ) { return false; } 32 bool on_selected( blocking_lock & this, select_node & node ) { return true; } 33 -
libcfa/src/bits/weakso_locks.hfa
r4daf79f rc083c3d 23 23 #include "containers/list.hfa" 24 24 25 struct thread$;25 struct select_node; 26 26 27 27 //----------------------------------------------------------------------------- … … 32 32 33 33 // List of blocked threads 34 dlist( thread$) blocked_threads;34 dlist( select_node ) blocked_threads; 35 35 36 36 // Count of current blocked threads … … 60 60 void on_wakeup( blocking_lock & this, size_t ) OPTIONAL_THREAD; 61 61 size_t wait_count( blocking_lock & this ) OPTIONAL_THREAD; 62 bool register_select( blocking_lock & this, select_node & node ) OPTIONAL_THREAD; 63 bool unregister_select( blocking_lock & this, select_node & node ) OPTIONAL_THREAD; 64 bool on_selected( blocking_lock & this, select_node & node ) OPTIONAL_THREAD; 62 65 63 66 //---------- … … 75 78 static inline void on_wakeup( multiple_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 76 79 static inline void on_notify( multiple_acquisition_lock & this, struct thread$ * t ){ on_notify( (blocking_lock &)this, t ); } 80 static inline bool register_select( multiple_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 81 static inline bool unregister_select( multiple_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 82 static inline bool on_selected( multiple_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } -
libcfa/src/concurrency/channel.hfa
r4daf79f rc083c3d 4 4 #include <list.hfa> 5 5 #include <mutex_stmt.hfa> 6 7 // link field used for threads waiting on channel 8 struct wait_link { 9 // used to put wait_link on a dl queue 10 inline dlink(wait_link); 11 12 // waiting thread 13 struct thread$ * t; 14 15 // shadow field 16 void * elem; 17 }; 18 P9_EMBEDDED( wait_link, dlink(wait_link) ) 19 20 static inline void ?{}( wait_link & this, thread$ * t, void * elem ) { 21 this.t = t; 22 this.elem = elem; 23 } 24 25 // wake one thread from the list 26 static inline void wake_one( dlist( wait_link ) & queue ) { 27 wait_link & popped = try_pop_front( queue ); 28 unpark( popped.t ); 29 } 6 #include "select.hfa" 30 7 31 8 // returns true if woken due to shutdown 32 9 // blocks thread on list and releases passed lock 33 static inline bool block( dlist( wait_link) & queue, void * elem_ptr, go_mutex & lock ) {34 wait_link w{ active_thread(), elem_ptr };35 insert_last( queue, w);10 static inline bool block( dlist( select_node ) & queue, void * elem_ptr, go_mutex & lock ) { 11 select_node sn{ active_thread(), elem_ptr }; 12 insert_last( queue, sn ); 36 13 unlock( lock ); 37 14 park(); 38 return w.elem == 0p; 15 return sn.extra == 0p; 16 } 17 18 // Waituntil support (un)register_select helper routine 19 // Sets select node avail if not special OR case and then unlocks 20 static inline void __set_avail_then_unlock( select_node & node, go_mutex & mutex_lock ) { 21 if ( node.park_counter ) __make_select_node_available( node ); 22 unlock( mutex_lock ); 39 23 } 40 24 … … 59 43 size_t size, front, back, count; 60 44 T * buffer; 61 dlist( wait_link) prods, cons; // lists of blocked threads45 dlist( select_node ) prods, cons; // lists of blocked threads 62 46 go_mutex mutex_lock; // MX lock 63 47 bool closed; // indicates channel close/open … … 70 54 size = _size; 71 55 front = back = count = 0; 72 buffer = aalloc( size );56 if ( size != 0 ) buffer = aalloc( size ); 73 57 prods{}; 74 58 cons{}; … … 87 71 #endif 88 72 verifyf( cons`isEmpty && prods`isEmpty, "Attempted to delete channel with waiting threads (Deadlock).\n" ); 89 delete( buffer );73 if ( size != 0 ) delete( buffer ); 90 74 } 91 75 static inline size_t get_count( channel(T) & chan ) with(chan) { return count; } … … 102 86 // flush waiting consumers and producers 103 87 while ( has_waiting_consumers( chan ) ) { 104 cons`first.elem = 0p; 88 if( !__handle_waituntil_OR( cons ) ) // ensure we only signal special OR case threads when they win the race 89 break; // if __handle_waituntil_OR returns false cons is empty so break 90 cons`first.extra = 0p; 105 91 wake_one( cons ); 106 92 } 107 93 while ( has_waiting_producers( chan ) ) { 108 prods`first.elem = 0p; 94 if( !__handle_waituntil_OR( prods ) ) // ensure we only signal special OR case threads when they win the race 95 break; // if __handle_waituntil_OR returns false prods is empty so break 96 prods`first.extra = 0p; 109 97 wake_one( prods ); 110 98 } … … 114 102 static inline void is_closed( channel(T) & chan ) with(chan) { return closed; } 115 103 104 // used to hand an element to a blocked consumer and signal it 105 static inline void __cons_handoff( channel(T) & chan, T & elem ) with(chan) { 106 memcpy( cons`first.extra, (void *)&elem, sizeof(T) ); // do waiting consumer work 107 wake_one( cons ); 108 } 109 110 // used to hand an element to a blocked producer and signal it 111 static inline void __prods_handoff( channel(T) & chan, T & retval ) with(chan) { 112 memcpy( (void *)&retval, prods`first.extra, sizeof(T) ); 113 wake_one( prods ); 114 } 115 116 116 static inline void flush( channel(T) & chan, T elem ) with(chan) { 117 117 lock( mutex_lock ); 118 118 while ( count == 0 && !cons`isEmpty ) { 119 memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work 120 wake_one( cons ); 119 __cons_handoff( chan, elem ); 121 120 } 122 121 unlock( mutex_lock ); … … 125 124 // handles buffer insert 126 125 static inline void __buf_insert( channel(T) & chan, T & elem ) with(chan) { 127 memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T));126 memcpy( (void *)&buffer[back], (void *)&elem, sizeof(T) ); 128 127 count += 1; 129 128 back++; … … 131 130 } 132 131 133 // does the buffer insert or hands elem directly to consumer if one is waiting134 static inline void __do_insert( channel(T) & chan, T & elem ) with(chan) {135 if ( count == 0 && !cons`isEmpty ) {136 memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work137 wake_one( cons );138 } else __buf_insert( chan, elem );139 }140 141 132 // needed to avoid an extra copy in closed case 142 133 static inline bool __internal_try_insert( channel(T) & chan, T & elem ) with(chan) { … … 145 136 operations++; 146 137 #endif 138 139 ConsEmpty: if ( !cons`isEmpty ) { 140 if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty; 141 __cons_handoff( chan, elem ); 142 unlock( mutex_lock ); 143 return true; 144 } 145 147 146 if ( count == size ) { unlock( mutex_lock ); return false; } 148 __do_insert( chan, elem ); 147 148 __buf_insert( chan, elem ); 149 149 unlock( mutex_lock ); 150 150 return true; … … 157 157 // handles closed case of insert routine 158 158 static inline void __closed_insert( channel(T) & chan, T & elem ) with(chan) { 159 channel_closed except{ &channel_closed_vt, &elem, &chan };159 channel_closed except{ &channel_closed_vt, &elem, &chan }; 160 160 throwResume except; // throw closed resumption 161 161 if ( !__internal_try_insert( chan, elem ) ) throw except; // if try to insert fails (would block), throw termination … … 182 182 } 183 183 184 // have to check for the zero size channel case185 if ( size == 0 &&!cons`isEmpty ) {186 memcpy(cons`first.elem, (void *)&elem, sizeof(T));187 wake_one( cons);188 unlock( mutex_lock ); 189 return true;184 // buffer count must be zero if cons are blocked (also handles zero-size case) 185 ConsEmpty: if ( !cons`isEmpty ) { 186 if ( !__handle_waituntil_OR( cons ) ) break ConsEmpty; 187 __cons_handoff( chan, elem ); 188 unlock( mutex_lock ); 189 return; 190 190 } 191 191 … … 202 202 } // if 203 203 204 if ( count == 0 && !cons`isEmpty ) { 205 memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work 206 wake_one( cons ); 207 } else __buf_insert( chan, elem ); 208 209 unlock( mutex_lock ); 210 return; 211 } 212 213 // handles buffer remove 214 static inline void __buf_remove( channel(T) & chan, T & retval ) with(chan) { 215 memcpy((void *)&retval, (void *)&buffer[front], sizeof(T)); 204 __buf_insert( chan, elem ); 205 unlock( mutex_lock ); 206 } 207 208 // does the buffer remove and potentially does waiting producer work 209 static inline void __do_remove( channel(T) & chan, T & retval ) with(chan) { 210 memcpy( (void *)&retval, (void *)&buffer[front], sizeof(T) ); 216 211 count -= 1; 217 212 front = (front + 1) % size; 218 }219 220 // does the buffer remove and potentially does waiting producer work221 static inline void __do_remove( channel(T) & chan, T & retval ) with(chan) {222 __buf_remove( chan, retval );223 213 if (count == size - 1 && !prods`isEmpty ) { 224 __buf_insert( chan, *(T *)prods`first.elem ); // do waiting producer work 214 if ( !__handle_waituntil_OR( prods ) ) return; 215 __buf_insert( chan, *(T *)prods`first.extra ); // do waiting producer work 225 216 wake_one( prods ); 226 217 } … … 233 224 operations++; 234 225 #endif 226 227 ZeroSize: if ( size == 0 && !prods`isEmpty ) { 228 if ( !__handle_waituntil_OR( prods ) ) break ZeroSize; 229 __prods_handoff( chan, retval ); 230 unlock( mutex_lock ); 231 return true; 232 } 233 235 234 if ( count == 0 ) { unlock( mutex_lock ); return false; } 235 236 236 __do_remove( chan, retval ); 237 237 unlock( mutex_lock ); … … 244 244 static inline [T, bool] try_remove( channel(T) & chan ) { 245 245 T retval; 246 return [ retval, __internal_try_remove( chan, retval ) ]; 247 } 248 249 static inline T try_remove( channel(T) & chan, T elem ) { 246 bool success = __internal_try_remove( chan, retval ); 247 return [ retval, success ]; 248 } 249 250 static inline T try_remove( channel(T) & chan ) { 250 251 T retval; 251 252 __internal_try_remove( chan, retval ); … … 255 256 // handles closed case of insert routine 256 257 static inline void __closed_remove( channel(T) & chan, T & retval ) with(chan) { 257 channel_closed except{ &channel_closed_vt, 0p, &chan };258 channel_closed except{ &channel_closed_vt, 0p, &chan }; 258 259 throwResume except; // throw resumption 259 260 if ( !__internal_try_remove( chan, retval ) ) throw except; // if try to remove fails (would block), throw termination … … 279 280 280 281 // have to check for the zero size channel case 281 if ( size == 0 && !prods`isEmpty ) {282 memcpy((void *)&retval, (void *)prods`first.elem, sizeof(T));283 wake_one( prods);282 ZeroSize: if ( size == 0 && !prods`isEmpty ) { 283 if ( !__handle_waituntil_OR( prods ) ) break ZeroSize; 284 __prods_handoff( chan, retval ); 284 285 unlock( mutex_lock ); 285 286 return retval; … … 287 288 288 289 // wait if buffer is empty, work will be completed by someone else 289 if ( count == 0) {290 if ( count == 0 ) { 290 291 #ifdef CHAN_STATS 291 292 blocks++; … … 299 300 // Remove from buffer 300 301 __do_remove( chan, retval ); 301 302 302 unlock( mutex_lock ); 303 303 return retval; 304 304 } 305 306 /////////////////////////////////////////////////////////////////////////////////////////// 307 // The following is support for waituntil (select) statements 308 /////////////////////////////////////////////////////////////////////////////////////////// 309 static inline bool unregister_chan( channel(T) & chan, select_node & node ) with(chan) { 310 if ( !node`isListed && !node.park_counter ) return false; // handle special OR case 311 lock( mutex_lock ); 312 if ( node`isListed ) { // op wasn't performed 313 #ifdef CHAN_STATS 314 operations--; 315 #endif 316 remove( node ); 317 unlock( mutex_lock ); 318 return false; 319 } 320 unlock( mutex_lock ); 321 322 // only return true when not special OR case, not exceptional calse and status is SAT 323 return ( node.extra == 0p || !node.park_counter ) ? false : *node.clause_status == __SELECT_SAT; 324 } 325 326 // type used by select statement to capture a chan read as the selected operation 327 struct chan_read { 328 channel(T) & chan; 329 T & ret; 330 }; 331 332 static inline void ?{}( chan_read(T) & cr, channel(T) & chan, T & ret ) { 333 &cr.chan = &chan; 334 &cr.ret = &ret; 335 } 336 static inline chan_read(T) ?<<?( T & ret, channel(T) & chan ) { chan_read(T) cr{ chan, ret }; return cr; } 337 338 static inline void __handle_select_closed_read( chan_read(T) & this, select_node & node ) with(this.chan, this) { 339 __closed_remove( chan, ret ); 340 // if we get here then the insert succeeded 341 __make_select_node_available( node ); 342 } 343 344 static inline bool register_select( chan_read(T) & this, select_node & node ) with(this.chan, this) { 345 // mutex(sout) sout | "register_read"; 346 lock( mutex_lock ); 347 node.extra = &ret; // set .extra so that if it == 0p later in on_selected it is due to channel close 348 349 #ifdef CHAN_STATS 350 if ( !closed ) operations++; 351 #endif 352 353 // check if we can complete operation. If so race to establish winner in special OR case 354 if ( !node.park_counter && ( count != 0 || !prods`isEmpty || unlikely(closed) ) ) { 355 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 356 unlock( mutex_lock ); 357 return false; 358 } 359 } 360 361 if ( unlikely(closed) ) { 362 unlock( mutex_lock ); 363 __handle_select_closed_read( this, node ); 364 return true; 365 } 366 367 // have to check for the zero size channel case 368 ZeroSize: if ( size == 0 && !prods`isEmpty ) { 369 if ( !__handle_waituntil_OR( prods ) ) break ZeroSize; 370 __prods_handoff( chan, ret ); 371 __set_avail_then_unlock( node, mutex_lock ); 372 return true; 373 } 374 375 // wait if buffer is empty, work will be completed by someone else 376 if ( count == 0 ) { 377 #ifdef CHAN_STATS 378 blocks++; 379 #endif 380 381 insert_last( cons, node ); 382 unlock( mutex_lock ); 383 return false; 384 } 385 386 // Remove from buffer 387 __do_remove( chan, ret ); 388 __set_avail_then_unlock( node, mutex_lock ); 389 return true; 390 } 391 static inline bool unregister_select( chan_read(T) & this, select_node & node ) { return unregister_chan( this.chan, node ); } 392 static inline bool on_selected( chan_read(T) & this, select_node & node ) with(this) { 393 if ( node.extra == 0p ) // check if woken up due to closed channel 394 __closed_remove( chan, ret ); 395 // This is only reachable if not closed or closed exception was handled 396 return true; 397 } 398 399 // type used by select statement to capture a chan write as the selected operation 400 struct chan_write { 401 channel(T) & chan; 402 T elem; 403 }; 404 405 static inline void ?{}( chan_write(T) & cw, channel(T) & chan, T elem ) { 406 &cw.chan = &chan; 407 memcpy( (void *)&cw.elem, (void *)&elem, sizeof(T) ); 408 } 409 static inline chan_write(T) ?>>?( T elem, channel(T) & chan ) { chan_write(T) cw{ chan, elem }; return cw; } 410 411 static inline void __handle_select_closed_write( chan_write(T) & this, select_node & node ) with(this.chan, this) { 412 __closed_insert( chan, elem ); 413 // if we get here then the insert succeeded 414 __make_select_node_available( node ); 415 } 416 417 static inline bool register_select( chan_write(T) & this, select_node & node ) with(this.chan, this) { 418 // mutex(sout) sout | "register_write"; 419 lock( mutex_lock ); 420 node.extra = &elem; // set .extra so that if it == 0p later in on_selected it is due to channel close 421 422 #ifdef CHAN_STATS 423 if ( !closed ) operations++; 424 #endif 425 426 // check if we can complete operation. If so race to establish winner in special OR case 427 if ( !node.park_counter && ( count != size || !cons`isEmpty || unlikely(closed) ) ) { 428 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 429 unlock( mutex_lock ); 430 return false; 431 } 432 } 433 434 // if closed handle 435 if ( unlikely(closed) ) { 436 unlock( mutex_lock ); 437 __handle_select_closed_write( this, node ); 438 return true; 439 } 440 441 // handle blocked consumer case via handoff (buffer is implicitly empty) 442 ConsEmpty: if ( !cons`isEmpty ) { 443 if ( !__handle_waituntil_OR( cons ) ) { 444 // mutex(sout) sout | "empty"; 445 break ConsEmpty; 446 } 447 // mutex(sout) sout | "signal"; 448 __cons_handoff( chan, elem ); 449 __set_avail_then_unlock( node, mutex_lock ); 450 return true; 451 } 452 453 // insert node in list if buffer is full, work will be completed by someone else 454 if ( count == size ) { 455 #ifdef CHAN_STATS 456 blocks++; 457 #endif 458 459 insert_last( prods, node ); 460 unlock( mutex_lock ); 461 return false; 462 } // if 463 464 // otherwise carry out write either via normal insert 465 __buf_insert( chan, elem ); 466 __set_avail_then_unlock( node, mutex_lock ); 467 return true; 468 } 469 static inline bool unregister_select( chan_write(T) & this, select_node & node ) { return unregister_chan( this.chan, node ); } 470 471 static inline bool on_selected( chan_write(T) & this, select_node & node ) with(this) { 472 if ( node.extra == 0p ) // check if woken up due to closed channel 473 __closed_insert( chan, elem ); 474 475 // This is only reachable if not closed or closed exception was handled 476 return true; 477 } 478 479 305 480 } // forall( T ) 481 482 483 -
libcfa/src/concurrency/future.hfa
r4daf79f rc083c3d 19 19 #include "monitor.hfa" 20 20 #include "select.hfa" 21 #include "locks.hfa" 21 22 22 23 //---------------------------------------------------------------------------- … … 26 27 // future_t is lockfree and uses atomics which aren't needed given we use locks here 27 28 forall( T ) { 28 // enum (int){ FUTURE_EMPTY = 0, FUTURE_FULFILLED = 1 }; // Enums seem to be broken so feel free to add this back afterwards29 // enum { FUTURE_EMPTY = 0, FUTURE_FULFILLED = 1 }; // Enums seem to be broken so feel free to add this back afterwards 29 30 30 31 // temporary enum replacement … … 44 45 }; 45 46 46 // C_TODO: perhaps allow exceptions to be inserted like uC++?47 48 47 static inline { 49 48 … … 82 81 void _internal_flush( future(T) & this ) with(this) { 83 82 while( ! waiters`isEmpty ) { 83 if ( !__handle_waituntil_OR( waiters ) ) // handle special waituntil OR case 84 break; // if handle_OR returns false then waiters is empty so break 84 85 select_node &s = try_pop_front( waiters ); 85 86 86 if ( s. race_flag== 0p )87 if ( s.clause_status == 0p ) 87 88 // poke in result so that woken threads do not need to reacquire any locks 88 // *(((future_node(T) &)s).my_result) = result;89 89 copy_T( result, *(((future_node(T) &)s).my_result) ); 90 else if ( ! install_select_winner( s, &this ) ) continue;90 else if ( !__make_select_node_available( s ) ) continue; 91 91 92 92 // only unpark if future is not selected … … 97 97 98 98 // Fulfil the future, returns whether or not someone was unblocked 99 bool fulfil( future(T) & this, T &val ) with(this) {99 bool fulfil( future(T) & this, T val ) with(this) { 100 100 lock( lock ); 101 101 if( state != FUTURE_EMPTY ) … … 153 153 } 154 154 155 void * register_select( future(T) & this, select_node & s ) with(this) { 156 lock( lock ); 157 158 // future not ready -> insert select node and return 0p 155 bool register_select( future(T) & this, select_node & s ) with(this) { 156 lock( lock ); 157 158 // check if we can complete operation. If so race to establish winner in special OR case 159 if ( !s.park_counter && state != FUTURE_EMPTY ) { 160 if ( !__make_select_node_available( s ) ) { // we didn't win the race so give up on registering 161 unlock( lock ); 162 return false; 163 } 164 } 165 166 // future not ready -> insert select node and return 159 167 if( state == FUTURE_EMPTY ) { 160 168 insert_last( waiters, s ); 161 169 unlock( lock ); 162 return 0p; 163 } 164 165 // future ready and we won race to install it as the select winner return 1p 166 if ( install_select_winner( s, &this ) ) { 167 unlock( lock ); 168 return 1p; 169 } 170 171 unlock( lock ); 172 // future ready and we lost race to install it as the select winner 173 return 2p; 174 } 175 176 void unregister_select( future(T) & this, select_node & s ) with(this) { 170 return false; 171 } 172 173 __make_select_node_available( s ); 174 unlock( lock ); 175 return true; 176 } 177 178 bool unregister_select( future(T) & this, select_node & s ) with(this) { 179 if ( ! s`isListed ) return false; 177 180 lock( lock ); 178 181 if ( s`isListed ) remove( s ); 179 182 unlock( lock ); 183 return false; 180 184 } 181 185 186 bool on_selected( future(T) & this, select_node & node ) { return true; } 182 187 } 183 188 } … … 186 191 // These futures below do not support select statements so they may not be as useful as 'future' 187 192 // however the 'single_future' is cheap and cheerful and is most likely more performant than 'future' 188 // since it uses raw atomics and no locks afaik193 // since it uses raw atomics and no locks 189 194 // 190 195 // As far as 'multi_future' goes I can't see many use cases as it will be less performant than 'future' -
libcfa/src/concurrency/invoke.h
r4daf79f rc083c3d 217 217 struct __thread_user_link cltr_link; 218 218 219 // used to point to this thd's current clh node220 volatile bool * clh_node;221 222 219 struct processor * last_proc; 220 221 // ptr used during handover between blocking lists to allow for stack allocation of intrusive nodes 222 // main use case is wait-morphing to allow a different node to be used to block on condvar vs lock 223 void * link_node; 223 224 224 225 PRNG_STATE_T random_state; // fast random numbers -
libcfa/src/concurrency/locks.cfa
r4daf79f rc083c3d 79 79 // lock is held by some other thread 80 80 if ( owner != 0p && owner != thrd ) { 81 insert_last( blocked_threads, *thrd ); 81 select_node node; 82 insert_last( blocked_threads, node ); 82 83 wait_count++; 83 84 unlock( lock ); 84 85 park( ); 85 } 86 // multi acquisition lock is held by current thread 87 else if ( owner == thrd && multi_acquisition ) { 86 return; 87 } else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread 88 88 recursion_count++; 89 unlock( lock ); 90 } 91 // lock isn't held 92 else { 89 } else { // lock isn't held 93 90 owner = thrd; 94 91 recursion_count = 1; 95 unlock( lock );96 } 92 } 93 unlock( lock ); 97 94 } 98 95 … … 117 114 } 118 115 119 static void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 120 thread$ * t = &try_pop_front( blocked_threads ); 121 owner = t; 122 recursion_count = ( t ? 1 : 0 ); 123 if ( t ) wait_count--; 124 unpark( t ); 116 // static void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 117 // thread$ * t = &try_pop_front( blocked_threads ); 118 // owner = t; 119 // recursion_count = ( t ? 1 : 0 ); 120 // if ( t ) wait_count--; 121 // unpark( t ); 122 // } 123 124 static inline void pop_node( blocking_lock & this ) with( this ) { 125 __handle_waituntil_OR( blocked_threads ); 126 select_node * node = &try_pop_front( blocked_threads ); 127 if ( node ) { 128 wait_count--; 129 owner = node->blocked_thread; 130 recursion_count = 1; 131 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 132 wake_one( blocked_threads, *node ); 133 } else { 134 owner = 0p; 135 recursion_count = 0; 136 } 125 137 } 126 138 … … 134 146 recursion_count--; 135 147 if ( recursion_count == 0 ) { 136 pop_ and_set_new_owner( this );148 pop_node( this ); 137 149 } 138 150 unlock( lock ); … … 147 159 // lock held 148 160 if ( owner != 0p ) { 149 insert_last( blocked_threads, * t);161 insert_last( blocked_threads, *(select_node *)t->link_node ); 150 162 wait_count++; 151 unlock( lock );152 163 } 153 164 // lock not held … … 156 167 recursion_count = 1; 157 168 unpark( t ); 158 unlock( lock );159 } 169 } 170 unlock( lock ); 160 171 } 161 172 … … 167 178 size_t ret = recursion_count; 168 179 169 pop_and_set_new_owner( this ); 180 pop_node( this ); 181 182 select_node node; 183 active_thread()->link_node = (void *)&node; 170 184 unlock( lock ); 185 186 park(); 187 171 188 return ret; 172 189 } … … 175 192 recursion_count = recursion; 176 193 } 194 195 // waituntil() support 196 bool register_select( blocking_lock & this, select_node & node ) with(this) { 197 lock( lock __cfaabi_dbg_ctx2 ); 198 thread$ * thrd = active_thread(); 199 200 // single acquisition lock is held by current thread 201 /* paranoid */ verifyf( owner != thrd || multi_acquisition, "Single acquisition lock holder (%p) attempted to reacquire the lock %p resulting in a deadlock.", owner, &this ); 202 203 if ( !node.park_counter && ( (owner == thrd && multi_acquisition) || owner == 0p ) ) { // OR special case 204 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 205 unlock( lock ); 206 return false; 207 } 208 } 209 210 // lock is held by some other thread 211 if ( owner != 0p && owner != thrd ) { 212 insert_last( blocked_threads, node ); 213 wait_count++; 214 unlock( lock ); 215 return false; 216 } else if ( owner == thrd && multi_acquisition ) { // multi acquisition lock is held by current thread 217 recursion_count++; 218 } else { // lock isn't held 219 owner = thrd; 220 recursion_count = 1; 221 } 222 223 if ( node.park_counter ) __make_select_node_available( node ); 224 unlock( lock ); 225 return true; 226 } 227 228 bool unregister_select( blocking_lock & this, select_node & node ) with(this) { 229 lock( lock __cfaabi_dbg_ctx2 ); 230 if ( node`isListed ) { 231 remove( node ); 232 wait_count--; 233 unlock( lock ); 234 return false; 235 } 236 237 if ( owner == active_thread() ) { 238 /* paranoid */ verifyf( recursion_count == 1 || multi_acquisition, "Thread %p attempted to unlock owner lock %p in waituntil unregister, which is not recursive but has a recursive count of %zu", active_thread(), &this, recursion_count ); 239 // if recursion count is zero release lock and set new owner if one is waiting 240 recursion_count--; 241 if ( recursion_count == 0 ) { 242 pop_node( this ); 243 } 244 } 245 unlock( lock ); 246 return false; 247 } 248 249 bool on_selected( blocking_lock & this, select_node & node ) { return true; } 177 250 178 251 //----------------------------------------------------------------------------- … … 311 384 int counter( condition_variable(L) & this ) with(this) { return count; } 312 385 313 static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {386 static void enqueue_thread( condition_variable(L) & this, info_thread(L) * i ) with(this) { 314 387 // add info_thread to waiting queue 315 388 insert_last( blocked_threads, *i ); 316 389 count++; 317 size_t recursion_count = 0; 318 if (i->lock) { 390 // size_t recursion_count = 0; 391 // if (i->lock) { 392 // // if lock was passed get recursion count to reset to after waking thread 393 // recursion_count = on_wait( *i->lock ); 394 // } 395 // return recursion_count; 396 } 397 398 static size_t block_and_get_recursion( info_thread(L) & i ) { 399 size_t recursion_count = 0; 400 if ( i.lock ) { 319 401 // if lock was passed get recursion count to reset to after waking thread 320 recursion_count = on_wait( *i ->lock );321 } 322 323 402 recursion_count = on_wait( *i.lock ); // this call blocks 403 } else park( ); 404 return recursion_count; 405 } 324 406 325 407 // helper for wait()'s' with no timeout 326 408 static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 327 409 lock( lock __cfaabi_dbg_ctx2 ); 328 size_t recursion_count = queue_and_get_recursion(this, &i); 410 enqueue_thread( this, &i ); 411 // size_t recursion_count = queue_and_get_recursion( this, &i ); 329 412 unlock( lock ); 330 413 331 414 // blocks here 332 park( ); 415 size_t recursion_count = block_and_get_recursion( i ); 416 // park( ); 333 417 334 418 // resets recursion count here after waking 335 if ( i.lock) on_wakeup(*i.lock, recursion_count);419 if ( i.lock ) on_wakeup( *i.lock, recursion_count ); 336 420 } 337 421 … … 343 427 static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 344 428 lock( lock __cfaabi_dbg_ctx2 ); 345 size_t recursion_count = queue_and_get_recursion(this, &info); 429 enqueue_thread( this, &info ); 430 // size_t recursion_count = queue_and_get_recursion( this, &info ); 346 431 alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info }; 347 432 unlock( lock ); … … 351 436 352 437 // blocks here 353 park(); 438 size_t recursion_count = block_and_get_recursion( info ); 439 // park(); 354 440 355 441 // unregisters alarm so it doesn't go off if this happens first … … 357 443 358 444 // resets recursion count here after waking 359 if ( info.lock) on_wakeup(*info.lock, recursion_count);445 if ( info.lock ) on_wakeup( *info.lock, recursion_count ); 360 446 } 361 447 … … 417 503 info_thread( L ) i = { active_thread(), info, &l }; 418 504 insert_last( blocked_threads, i ); 419 size_t recursion_count = on_wait( *i.lock ); 420 park( );505 size_t recursion_count = on_wait( *i.lock ); // blocks here 506 // park( ); 421 507 on_wakeup(*i.lock, recursion_count); 422 508 } … … 459 545 bool empty ( pthread_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; } 460 546 461 static size_t queue_and_get_recursion( pthread_cond_var(L) & this, info_thread(L) * i ) with(this) { 462 // add info_thread to waiting queue 463 insert_last( blocked_threads, *i ); 464 size_t recursion_count = 0; 465 recursion_count = on_wait( *i->lock ); 466 return recursion_count; 467 } 547 // static size_t queue_and_get_recursion( pthread_cond_var(L) & this, info_thread(L) * i ) with(this) { 548 // // add info_thread to waiting queue 549 // insert_last( blocked_threads, *i ); 550 // size_t recursion_count = 0; 551 // recursion_count = on_wait( *i->lock ); 552 // return recursion_count; 553 // } 554 468 555 469 556 static void queue_info_thread_timeout( pthread_cond_var(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 470 557 lock( lock __cfaabi_dbg_ctx2 ); 471 size_t recursion_count = queue_and_get_recursion(this, &info); 558 // size_t recursion_count = queue_and_get_recursion(this, &info); 559 insert_last( blocked_threads, info ); 472 560 pthread_alarm_node_wrap(L) node_wrap = { t, 0`s, callback, &this, &info }; 473 561 unlock( lock ); … … 477 565 478 566 // blocks here 479 park(); 567 size_t recursion_count = block_and_get_recursion( info ); 568 // park(); 480 569 481 570 // unregisters alarm so it doesn't go off if this happens first … … 483 572 484 573 // resets recursion count here after waking 485 if ( info.lock) on_wakeup(*info.lock, recursion_count);574 if ( info.lock ) on_wakeup( *info.lock, recursion_count ); 486 575 } 487 576 … … 493 582 lock( lock __cfaabi_dbg_ctx2 ); 494 583 info_thread( L ) i = { active_thread(), info, &l }; 495 size_t recursion_count = queue_and_get_recursion(this, &i); 496 unlock( lock ); 497 park( ); 498 on_wakeup(*i.lock, recursion_count); 584 insert_last( blocked_threads, i ); 585 // size_t recursion_count = queue_and_get_recursion( this, &i ); 586 unlock( lock ); 587 588 // blocks here 589 size_t recursion_count = block_and_get_recursion( i ); 590 // park(); 591 on_wakeup( *i.lock, recursion_count ); 499 592 } 500 593 -
libcfa/src/concurrency/locks.hfa
r4daf79f rc083c3d 30 30 #include "time.hfa" 31 31 32 #include "select.hfa" 33 32 34 #include <fstream.hfa> 33 35 … … 70 72 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 71 73 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 74 static inline bool register_select( single_acquisition_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 75 static inline bool unregister_select( single_acquisition_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 76 static inline bool on_selected( single_acquisition_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 72 77 73 78 //---------- … … 84 89 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 85 90 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 91 static inline bool register_select( owner_lock & this, select_node & node ) { return register_select( (blocking_lock &)this, node ); } 92 static inline bool unregister_select( owner_lock & this, select_node & node ) { return unregister_select( (blocking_lock &)this, node ); } 93 static inline bool on_selected( owner_lock & this, select_node & node ) { return on_selected( (blocking_lock &)this, node ); } 86 94 87 95 //----------------------------------------------------------------------------- … … 180 188 181 189 // if this is called recursively IT WILL DEADLOCK!!!!! 182 static inline void lock( futex_mutex & this) with(this) {190 static inline void lock( futex_mutex & this ) with(this) { 183 191 int state; 184 192 … … 190 198 for (int i = 0; i < spin; i++) Pause(); 191 199 } 192 193 // // no contention try to acquire194 // if (internal_try_lock(this, state)) return;195 200 196 201 // if not in contended state, set to be in contended state … … 213 218 214 219 static inline void on_notify( futex_mutex & f, thread$ * t){ unpark(t); } 215 static inline size_t on_wait( futex_mutex & f ) { unlock(f); return 0;}220 static inline size_t on_wait( futex_mutex & f ) { unlock(f); park(); return 0; } 216 221 217 222 // to set recursion count after getting signalled; … … 244 249 245 250 // if this is called recursively IT WILL DEADLOCK!!!!! 246 static inline void lock( go_mutex & this) with(this) {251 static inline void lock( go_mutex & this ) with( this ) { 247 252 int state, init_state; 248 253 … … 255 260 while( !val ) { // lock unlocked 256 261 state = 0; 257 if ( internal_try_lock(this, state, init_state)) return;262 if ( internal_try_lock( this, state, init_state ) ) return; 258 263 } 259 264 for (int i = 0; i < 30; i++) Pause(); … … 262 267 while( !val ) { // lock unlocked 263 268 state = 0; 264 if ( internal_try_lock(this, state, init_state)) return;269 if ( internal_try_lock( this, state, init_state ) ) return; 265 270 } 266 271 sched_yield(); 267 272 268 273 // if not in contended state, set to be in contended state 269 state = internal_exchange( this, 2);274 state = internal_exchange( this, 2 ); 270 275 if ( !state ) return; // state == 0 271 276 init_state = 2; 272 futex( (int*)&val, FUTEX_WAIT, 2); // if val is not 2 this returns with EWOULDBLOCK277 futex( (int*)&val, FUTEX_WAIT, 2 ); // if val is not 2 this returns with EWOULDBLOCK 273 278 } 274 279 } … … 276 281 static inline void unlock( go_mutex & this ) with(this) { 277 282 // if uncontended do atomic unlock and then return 278 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1) return;283 if ( __atomic_exchange_n(&val, 0, __ATOMIC_RELEASE) == 1 ) return; 279 284 280 285 // otherwise threads are blocked so we must wake one 281 futex( (int *)&val, FUTEX_WAKE, 1);282 } 283 284 static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t); }285 static inline size_t on_wait( go_mutex & f ) { unlock(f); return 0;}286 futex( (int *)&val, FUTEX_WAKE, 1 ); 287 } 288 289 static inline void on_notify( go_mutex & f, thread$ * t){ unpark( t ); } 290 static inline size_t on_wait( go_mutex & f ) { unlock( f ); park(); return 0; } 286 291 static inline void on_wakeup( go_mutex & f, size_t recursion ) {} 287 288 //-----------------------------------------------------------------------------289 // CLH Spinlock290 // - No recursive acquisition291 // - Needs to be released by owner292 293 struct clh_lock {294 volatile bool * volatile tail;295 volatile bool * volatile head;296 };297 298 static inline void ?{}( clh_lock & this ) { this.tail = malloc(); *this.tail = true; }299 static inline void ^?{}( clh_lock & this ) { free(this.tail); }300 301 static inline void lock(clh_lock & l) {302 thread$ * curr_thd = active_thread();303 *(curr_thd->clh_node) = false;304 volatile bool * prev = __atomic_exchange_n((bool **)(&l.tail), (bool *)(curr_thd->clh_node), __ATOMIC_SEQ_CST);305 while(!__atomic_load_n(prev, __ATOMIC_SEQ_CST)) Pause();306 __atomic_store_n((bool **)(&l.head), (bool *)curr_thd->clh_node, __ATOMIC_SEQ_CST);307 curr_thd->clh_node = prev;308 }309 310 static inline void unlock(clh_lock & l) {311 __atomic_store_n((bool *)(l.head), true, __ATOMIC_SEQ_CST);312 }313 314 static inline void on_notify(clh_lock & this, struct thread$ * t ) { unpark(t); }315 static inline size_t on_wait(clh_lock & this) { unlock(this); return 0; }316 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); }317 292 318 293 //----------------------------------------------------------------------------- … … 337 312 static inline void ^?{}( exp_backoff_then_block_lock & this ){} 338 313 339 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val) with(this) {314 static inline bool internal_try_lock( exp_backoff_then_block_lock & this, size_t & compare_val ) with(this) { 340 315 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); 341 316 } 342 317 343 static inline bool try_lock( exp_backoff_then_block_lock & this) { size_t compare_val = 0; return internal_try_lock(this, compare_val); }344 345 static inline bool try_lock_contention( exp_backoff_then_block_lock & this) with(this) {346 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE);347 } 348 349 static inline bool block( exp_backoff_then_block_lock & this) with(this) {318 static inline bool try_lock( exp_backoff_then_block_lock & this ) { size_t compare_val = 0; return internal_try_lock( this, compare_val ); } 319 320 static inline bool try_lock_contention( exp_backoff_then_block_lock & this ) with(this) { 321 return !__atomic_exchange_n( &lock_value, 2, __ATOMIC_ACQUIRE ); 322 } 323 324 static inline bool block( exp_backoff_then_block_lock & this ) with(this) { 350 325 lock( spinlock __cfaabi_dbg_ctx2 ); 351 326 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) { … … 359 334 } 360 335 361 static inline void lock( exp_backoff_then_block_lock & this) with(this) {336 static inline void lock( exp_backoff_then_block_lock & this ) with(this) { 362 337 size_t compare_val = 0; 363 338 int spin = 4; … … 378 353 } 379 354 380 static inline void unlock( exp_backoff_then_block_lock & this) with(this) {355 static inline void unlock( exp_backoff_then_block_lock & this ) with(this) { 381 356 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 382 357 lock( spinlock __cfaabi_dbg_ctx2 ); … … 386 361 } 387 362 388 static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }389 static inline size_t on_wait( exp_backoff_then_block_lock & this) { unlock(this); return 0; }390 static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock(this); }363 static inline void on_notify( exp_backoff_then_block_lock & this, struct thread$ * t ) { unpark( t ); } 364 static inline size_t on_wait( exp_backoff_then_block_lock & this ) { unlock( this ); park(); return 0; } 365 static inline void on_wakeup( exp_backoff_then_block_lock & this, size_t recursion ) { lock( this ); } 391 366 392 367 //----------------------------------------------------------------------------- … … 418 393 419 394 // if this is called recursively IT WILL DEADLOCK!!!!! 420 static inline void lock( fast_block_lock & this) with(this) {395 static inline void lock( fast_block_lock & this ) with(this) { 421 396 lock( lock __cfaabi_dbg_ctx2 ); 422 397 if ( held ) { … … 430 405 } 431 406 432 static inline void unlock( fast_block_lock & this) with(this) {407 static inline void unlock( fast_block_lock & this ) with(this) { 433 408 lock( lock __cfaabi_dbg_ctx2 ); 434 409 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); … … 439 414 } 440 415 441 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) {416 static inline void on_notify( fast_block_lock & this, struct thread$ * t ) with(this) { 442 417 lock( lock __cfaabi_dbg_ctx2 ); 443 418 insert_last( blocked_threads, *t ); 444 419 unlock( lock ); 445 420 } 446 static inline size_t on_wait( fast_block_lock & this) { unlock(this); return 0; }447 static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { }421 static inline size_t on_wait( fast_block_lock & this) { unlock(this); park(); return 0; } 422 static inline void on_wakeup( fast_block_lock & this, size_t recursion ) { } 448 423 449 424 //----------------------------------------------------------------------------- … … 456 431 struct simple_owner_lock { 457 432 // List of blocked threads 458 dlist( thread$) blocked_threads;433 dlist( select_node ) blocked_threads; 459 434 460 435 // Spin lock used for mutual exclusion … … 477 452 static inline void ?=?( simple_owner_lock & this, simple_owner_lock this2 ) = void; 478 453 479 static inline void lock( simple_owner_lock & this) with(this) {480 if ( owner == active_thread()) {454 static inline void lock( simple_owner_lock & this ) with(this) { 455 if ( owner == active_thread() ) { 481 456 recursion_count++; 482 457 return; … … 484 459 lock( lock __cfaabi_dbg_ctx2 ); 485 460 486 if (owner != 0p) { 487 insert_last( blocked_threads, *active_thread() ); 461 if ( owner != 0p ) { 462 select_node node; 463 insert_last( blocked_threads, node ); 488 464 unlock( lock ); 489 465 park( ); … … 495 471 } 496 472 497 // TODO: fix duplicate def issue and bring this back 498 // void pop_and_set_new_owner( simple_owner_lock & this ) with( this ) { 499 // thread$ * t = &try_pop_front( blocked_threads ); 500 // owner = t; 501 // recursion_count = ( t ? 1 : 0 ); 502 // unpark( t ); 503 // } 504 505 static inline void unlock(simple_owner_lock & this) with(this) { 473 static inline void pop_node( simple_owner_lock & this ) with(this) { 474 __handle_waituntil_OR( blocked_threads ); 475 select_node * node = &try_pop_front( blocked_threads ); 476 if ( node ) { 477 owner = node->blocked_thread; 478 recursion_count = 1; 479 // if ( !node->clause_status || __make_select_node_available( *node ) ) unpark( node->blocked_thread ); 480 wake_one( blocked_threads, *node ); 481 } else { 482 owner = 0p; 483 recursion_count = 0; 484 } 485 } 486 487 static inline void unlock( simple_owner_lock & this ) with(this) { 506 488 lock( lock __cfaabi_dbg_ctx2 ); 507 489 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 510 492 recursion_count--; 511 493 if ( recursion_count == 0 ) { 512 // pop_and_set_new_owner( this ); 513 thread$ * t = &try_pop_front( blocked_threads ); 514 owner = t; 515 recursion_count = ( t ? 1 : 0 ); 516 unpark( t ); 494 pop_node( this ); 517 495 } 518 496 unlock( lock ); 519 497 } 520 498 521 static inline void on_notify(simple_owner_lock & this, structthread$ * t ) with(this) {499 static inline void on_notify(simple_owner_lock & this, thread$ * t ) with(this) { 522 500 lock( lock __cfaabi_dbg_ctx2 ); 523 501 // lock held 524 502 if ( owner != 0p ) { 525 insert_last( blocked_threads, * t);503 insert_last( blocked_threads, *(select_node *)t->link_node ); 526 504 } 527 505 // lock not held … … 534 512 } 535 513 536 static inline size_t on_wait( simple_owner_lock & this) with(this) {514 static inline size_t on_wait( simple_owner_lock & this ) with(this) { 537 515 lock( lock __cfaabi_dbg_ctx2 ); 538 516 /* paranoid */ verifyf( owner != 0p, "Attempt to release lock %p that isn't held", &this ); … … 541 519 size_t ret = recursion_count; 542 520 543 // pop_and_set_new_owner( this ); 544 545 thread$ * t = &try_pop_front( blocked_threads ); 546 owner = t; 547 recursion_count = ( t ? 1 : 0 ); 548 unpark( t ); 549 521 pop_node( this ); 522 523 select_node node; 524 active_thread()->link_node = (void *)&node; 550 525 unlock( lock ); 526 park(); 527 551 528 return ret; 552 529 } 553 530 554 static inline void on_wakeup(simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 531 static inline void on_wakeup( simple_owner_lock & this, size_t recursion ) with(this) { recursion_count = recursion; } 532 533 // waituntil() support 534 static inline bool register_select( simple_owner_lock & this, select_node & node ) with(this) { 535 lock( lock __cfaabi_dbg_ctx2 ); 536 537 // check if we can complete operation. If so race to establish winner in special OR case 538 if ( !node.park_counter && ( owner == active_thread() || owner == 0p ) ) { 539 if ( !__make_select_node_available( node ) ) { // we didn't win the race so give up on registering 540 unlock( lock ); 541 return false; 542 } 543 } 544 545 if ( owner == active_thread() ) { 546 recursion_count++; 547 if ( node.park_counter ) __make_select_node_available( node ); 548 unlock( lock ); 549 return true; 550 } 551 552 if ( owner != 0p ) { 553 insert_last( blocked_threads, node ); 554 unlock( lock ); 555 return false; 556 } 557 558 owner = active_thread(); 559 recursion_count = 1; 560 561 if ( node.park_counter ) __make_select_node_available( node ); 562 unlock( lock ); 563 return true; 564 } 565 566 static inline bool unregister_select( simple_owner_lock & this, select_node & node ) with(this) { 567 lock( lock __cfaabi_dbg_ctx2 ); 568 if ( node`isListed ) { 569 remove( node ); 570 unlock( lock ); 571 return false; 572 } 573 574 if ( owner == active_thread() ) { 575 recursion_count--; 576 if ( recursion_count == 0 ) { 577 pop_node( this ); 578 } 579 } 580 unlock( lock ); 581 return false; 582 } 583 584 static inline bool on_selected( simple_owner_lock & this, select_node & node ) { return true; } 585 555 586 556 587 //----------------------------------------------------------------------------- … … 578 609 579 610 // if this is called recursively IT WILL DEADLOCK! 580 static inline void lock( spin_queue_lock & this) with(this) {611 static inline void lock( spin_queue_lock & this ) with(this) { 581 612 mcs_spin_node node; 582 613 lock( lock, node ); … … 586 617 } 587 618 588 static inline void unlock( spin_queue_lock & this) with(this) {619 static inline void unlock( spin_queue_lock & this ) with(this) { 589 620 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 590 621 } 591 622 592 static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) {623 static inline void on_notify( spin_queue_lock & this, struct thread$ * t ) { 593 624 unpark(t); 594 625 } 595 static inline size_t on_wait( spin_queue_lock & this) { unlock(this); return 0; }596 static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock(this); }626 static inline size_t on_wait( spin_queue_lock & this ) { unlock( this ); park(); return 0; } 627 static inline void on_wakeup( spin_queue_lock & this, size_t recursion ) { lock( this ); } 597 628 598 629 … … 621 652 622 653 // if this is called recursively IT WILL DEADLOCK!!!!! 623 static inline void lock( mcs_block_spin_lock & this) with(this) {654 static inline void lock( mcs_block_spin_lock & this ) with(this) { 624 655 mcs_node node; 625 656 lock( lock, node ); … … 633 664 } 634 665 635 static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark(t); }636 static inline size_t on_wait( mcs_block_spin_lock & this) { unlock(this); return 0; }637 static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock(this); }666 static inline void on_notify( mcs_block_spin_lock & this, struct thread$ * t ) { unpark( t ); } 667 static inline size_t on_wait( mcs_block_spin_lock & this) { unlock( this ); park(); return 0; } 668 static inline void on_wakeup( mcs_block_spin_lock & this, size_t recursion ) {lock( this ); } 638 669 639 670 //----------------------------------------------------------------------------- … … 661 692 662 693 // if this is called recursively IT WILL DEADLOCK!!!!! 663 static inline void lock( block_spin_lock & this) with(this) {694 static inline void lock( block_spin_lock & this ) with(this) { 664 695 lock( lock ); 665 696 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 668 699 } 669 700 670 static inline void unlock( block_spin_lock & this) with(this) {701 static inline void unlock( block_spin_lock & this ) with(this) { 671 702 __atomic_store_n(&held, false, __ATOMIC_RELEASE); 672 703 } 673 704 674 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) {705 static inline void on_notify( block_spin_lock & this, struct thread$ * t ) with(this.lock) { 675 706 // first we acquire internal fast_block_lock 676 707 lock( lock __cfaabi_dbg_ctx2 ); … … 686 717 unpark(t); 687 718 } 688 static inline size_t on_wait( block_spin_lock & this) { unlock(this); return 0; }689 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) {719 static inline size_t on_wait( block_spin_lock & this ) { unlock( this ); park(); return 0; } 720 static inline void on_wakeup( block_spin_lock & this, size_t recursion ) with(this) { 690 721 // now we acquire the entire block_spin_lock upon waking up 691 722 while(__atomic_load_n(&held, __ATOMIC_SEQ_CST)) Pause(); … … 714 745 forall(L & | is_blocking_lock(L)) { 715 746 struct info_thread; 716 717 // // for use by sequence718 // info_thread(L) *& Back( info_thread(L) * this );719 // info_thread(L) *& Next( info_thread(L) * this );720 747 } 721 748 -
libcfa/src/concurrency/mutex_stmt.hfa
r4daf79f rc083c3d 15 15 }; 16 16 17 18 17 struct __mutex_stmt_lock_guard { 19 18 void ** lockarr; … … 30 29 31 30 forall(L & | is_lock(L)) { 32 33 struct scoped_lock { 34 L * internal_lock; 35 }; 36 37 static inline void ?{}( scoped_lock(L) & this, L & internal_lock ) { 38 this.internal_lock = &internal_lock; 39 lock(internal_lock); 40 } 41 42 static inline void ^?{}( scoped_lock(L) & this ) with(this) { 43 unlock(*internal_lock); 44 } 45 46 static inline void * __get_mutexstmt_lock_ptr( L & this ) { 47 return &this; 48 } 49 50 static inline L __get_mutexstmt_lock_type( L & this ); 51 52 static inline L __get_mutexstmt_lock_type( L * this ); 31 static inline void * __get_mutexstmt_lock_ptr( L & this ) { return &this; } 32 static inline L __get_mutexstmt_lock_type( L & this ) {} 33 static inline L __get_mutexstmt_lock_type( L * this ) {} 53 34 } -
libcfa/src/concurrency/select.hfa
r4daf79f rc083c3d 2 2 3 3 #include "containers/list.hfa" 4 #include <stdint.h> 5 #include <kernel.hfa> 6 #include <locks.hfa> 4 #include "stdint.h" 5 #include "kernel.hfa" 7 6 7 struct select_node; 8 9 // node status 10 static const unsigned long int __SELECT_UNSAT = 0; 11 static const unsigned long int __SELECT_SAT = 1; 12 static const unsigned long int __SELECT_RUN = 2; 13 14 static inline bool __CFA_has_clause_run( unsigned long int status ) { return status == __SELECT_RUN; } 15 static inline void __CFA_maybe_park( int * park_counter ) { 16 if ( __atomic_sub_fetch( park_counter, 1, __ATOMIC_SEQ_CST) < 0 ) 17 park(); 18 } 19 20 // node used for coordinating waituntil synchronization 8 21 struct select_node { 22 int * park_counter; // If this is 0p then the node is in a special OR case waituntil 23 unsigned long int * clause_status; // needs to point at ptr sized location, if this is 0p then node is not part of a waituntil 24 25 void * extra; // used to store arbitrary data needed by some primitives 26 9 27 thread$ * blocked_thread; 10 void ** race_flag;11 28 inline dlink(select_node); 12 29 }; 13 30 P9_EMBEDDED( select_node, dlink(select_node) ) 14 31 15 void ?{}( select_node & this ) { 16 this.blocked_thread = 0p; 17 this.race_flag = 0p; 32 static inline void ?{}( select_node & this ) { 33 this.blocked_thread = active_thread(); 34 this.clause_status = 0p; 35 this.park_counter = 0p; 36 this.extra = 0p; 18 37 } 19 38 20 void ?{}( select_node & this, thread$ * blocked_thread ) {39 static inline void ?{}( select_node & this, thread$ * blocked_thread ) { 21 40 this.blocked_thread = blocked_thread; 22 this.race_flag = 0p; 41 this.clause_status = 0p; 42 this.park_counter = 0p; 43 this.extra = 0p; 23 44 } 24 45 25 void ?{}( select_node & this, thread$ * blocked_thread, void ** race_flag) {46 static inline void ?{}( select_node & this, thread$ * blocked_thread, void * extra ) { 26 47 this.blocked_thread = blocked_thread; 27 this.race_flag = race_flag; 48 this.clause_status = 0p; 49 this.park_counter = 0p; 50 this.extra = extra; 28 51 } 29 52 30 void ^?{}( select_node & this ) {}53 static inline void ^?{}( select_node & this ) {} 31 54 55 static inline unsigned long int * __get_clause_status( select_node & s ) { return s.clause_status; } 32 56 33 57 //----------------------------------------------------------------------------- 34 58 // is_selectable 35 trait is_selectable(T & | sized(T)) { 36 // For registering a select on a selectable concurrency primitive 37 // return 0p if primitive not accessible yet 38 // return 1p if primitive gets acquired 39 // return 2p if primitive is accessible but some other primitive won the race 40 // C_TODO: add enum for return values 41 void * register_select( T &, select_node & ); 59 forall(T & | sized(T)) 60 trait is_selectable { 61 // For registering a select stmt on a selectable concurrency primitive 62 // Returns bool that indicates if operation is already SAT 63 bool register_select( T &, select_node & ); 42 64 43 void unregister_select( T &, select_node & ); 65 // For unregistering a select stmt on a selectable concurrency primitive 66 // If true is returned then the corresponding code block is run (only in non-special OR case and only if node status is not RUN) 67 bool unregister_select( T &, select_node & ); 68 69 // This routine is run on the selecting thread prior to executing the statement corresponding to the select_node 70 // passed as an arg to this routine 71 // If on_selected returns false, the statement is not run, if it returns true it is run. 72 bool on_selected( T &, select_node & ); 44 73 }; 45 74 46 static inline bool install_select_winner( select_node & this, void * primitive_ptr ) with(this) { 47 // temporary needed for atomic instruction 48 void * cmp_flag = 0p; 49 50 // if we dont win the selector race we need to potentially 51 // ignore this node and move to the next one so we return accordingly 52 if ( *race_flag != 0p || 53 !__atomic_compare_exchange_n( 54 race_flag, 55 &cmp_flag, 56 primitive_ptr, 57 false, 58 __ATOMIC_SEQ_CST, 59 __ATOMIC_SEQ_CST 60 ) 61 ) return false; // lost race and some other node triggered select 62 return true; // won race so this node is what the select proceeds with 75 // this is used inside the compiler to attempt to establish an else clause as a winner in the OR special case race 76 static inline bool __select_node_else_race( select_node & this ) with( this ) { 77 unsigned long int cmp_status = __SELECT_UNSAT; 78 return *clause_status == 0 79 && __atomic_compare_exchange_n( clause_status, &cmp_status, 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ); 63 80 } 81 82 // when a primitive becomes available it calls the following routine on it's node to update the select state: 83 // return true if we want to unpark the thd 84 static inline bool __make_select_node_available( select_node & this ) with( this ) { 85 unsigned long int cmp_status = __SELECT_UNSAT; 86 87 if( !park_counter ) 88 return *clause_status == 0 89 && __atomic_compare_exchange_n( clause_status, &cmp_status, (unsigned long int)&this, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ); // OR specific case where race was won 90 91 return *clause_status == 0 92 && __atomic_compare_exchange_n( clause_status, &cmp_status, __SELECT_SAT, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) // can maybe just use atomic write 93 && !__atomic_add_fetch( park_counter, 1, __ATOMIC_SEQ_CST); 94 } 95 96 // Handles the special OR case of the waituntil statement 97 // Since only one select node can win in the OR case, we need to race to set the node available BEFORE 98 // performing the operation since if we lose the race the operation should not be performed as it will be lost 99 // Returns true if execution can continue normally and false if the queue has now been drained 100 static inline bool __handle_waituntil_OR( dlist( select_node ) & queue ) { 101 if ( queue`isEmpty ) return false; 102 if ( queue`first.clause_status && !queue`first.park_counter ) { 103 while ( !queue`isEmpty ) { 104 // if node not a special OR case or if we win the special OR case race break 105 if ( !queue`first.clause_status || queue`first.park_counter || __make_select_node_available( queue`first ) ) { return true; } 106 // otherwise we lost the special OR race so discard node 107 try_pop_front( queue ); 108 } 109 return false; 110 } 111 return true; 112 } 113 114 // wake one thread from the list 115 static inline void wake_one( dlist( select_node ) & queue, select_node & popped ) { 116 if ( !popped.clause_status // normal case, node is not a select node 117 || ( popped.clause_status && !popped.park_counter ) // If popped link is special case OR selecting unpark but don't call __make_select_node_available 118 || __make_select_node_available( popped ) ) // check if popped link belongs to a selecting thread 119 unpark( popped.blocked_thread ); 120 } 121 122 static inline void wake_one( dlist( select_node ) & queue ) { wake_one( queue, try_pop_front( queue ) ); } 123 124 static inline void setup_clause( select_node & this, unsigned long int * clause_status, int * park_counter ) { 125 this.blocked_thread = active_thread(); 126 this.clause_status = clause_status; 127 this.park_counter = park_counter; 128 } 129 -
libcfa/src/concurrency/thread.cfa
r4daf79f rc083c3d 53 53 preferred = ready_queue_new_preferred(); 54 54 last_proc = 0p; 55 link_node = 0p; 55 56 PRNG_SET_SEED( random_state, __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() ); 56 57 #if defined( __CFA_WITH_VERIFY__ ) … … 59 60 #endif 60 61 61 clh_node = malloc( );62 *clh_node = false;63 64 62 doregister(curr_cluster, this); 65 63 monitors{ &self_mon_p, 1, (fptr_t)0 }; … … 70 68 canary = 0xDEADDEADDEADDEADp; 71 69 #endif 72 free(clh_node);73 70 unregister(curr_cluster, this); 74 71 ^self_cor{}; -
src/AST/Convert.cpp
r4daf79f rc083c3d 567 567 } 568 568 569 const ast::WhenClause * visit( const ast::WhenClause * node ) override final { 570 // There is no old-AST WhenClause, so this should never be called. 571 assert( !node ); 572 return nullptr; 573 } 574 569 575 const ast::Stmt * visit( const ast::WaitForStmt * node ) override final { 570 576 if ( inCache( node ) ) return nullptr; … … 573 579 for ( auto clause : node->clauses ) { 574 580 stmt->clauses.push_back({{ 575 get<Expression>().accept1( clause->target _func),581 get<Expression>().accept1( clause->target ), 576 582 get<Expression>().acceptL( clause->target_args ), 577 583 }, 578 584 get<Statement>().accept1( clause->stmt ), 579 get<Expression>().accept1( clause-> cond ),585 get<Expression>().accept1( clause->when_cond ), 580 586 }); 581 587 } … … 594 600 const ast::WaitForClause * visit( const ast::WaitForClause * node ) override final { 595 601 // There is no old-AST WaitForClause, so this should never be called. 602 assert( !node ); 603 return nullptr; 604 } 605 606 const ast::Stmt * visit( const ast::WaitUntilStmt * node ) override final { 607 // There is no old-AST WaitUntilStmt, so this should never be called. 596 608 assert( !node ); 597 609 return nullptr; … … 2158 2170 auto clause = new ast::WaitForClause( old->location ); 2159 2171 2160 clause->target _func= GET_ACCEPT_1(clauses[i].target.function, Expr);2172 clause->target = GET_ACCEPT_1(clauses[i].target.function, Expr); 2161 2173 clause->target_args = GET_ACCEPT_V(clauses[i].target.arguments, Expr); 2162 2174 clause->stmt = GET_ACCEPT_1(clauses[i].statement, Stmt); 2163 clause-> cond = GET_ACCEPT_1(clauses[i].condition, Expr);2175 clause->when_cond = GET_ACCEPT_1(clauses[i].condition, Expr); 2164 2176 2165 2177 stmt->clauses.push_back( clause ); -
src/AST/Fwd.hpp
r4daf79f rc083c3d 58 58 class FinallyClause; 59 59 class SuspendStmt; 60 class WhenClause; 60 61 class WaitForStmt; 61 62 class WaitForClause; 63 class WaitUntilStmt; 62 64 class WithStmt; 63 65 class DeclStmt; -
src/AST/Node.cpp
r4daf79f rc083c3d 174 174 template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::weak >; 175 175 template class ast::ptr_base< ast::FinallyClause, ast::Node::ref_type::strong >; 176 template class ast::ptr_base< ast::WhenClause, ast::Node::ref_type::weak >; 177 template class ast::ptr_base< ast::WhenClause, ast::Node::ref_type::strong >; 176 178 template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::weak >; 177 179 template class ast::ptr_base< ast::WaitForStmt, ast::Node::ref_type::strong >; 178 180 template class ast::ptr_base< ast::WaitForClause, ast::Node::ref_type::weak >; 179 181 template class ast::ptr_base< ast::WaitForClause, ast::Node::ref_type::strong >; 182 template class ast::ptr_base< ast::WaitUntilStmt, ast::Node::ref_type::weak >; 183 template class ast::ptr_base< ast::WaitUntilStmt, ast::Node::ref_type::strong >; 180 184 template class ast::ptr_base< ast::WithStmt, ast::Node::ref_type::weak >; 181 185 template class ast::ptr_base< ast::WithStmt, ast::Node::ref_type::strong >; -
src/AST/Pass.hpp
r4daf79f rc083c3d 162 162 const ast::FinallyClause * visit( const ast::FinallyClause * ) override final; 163 163 const ast::Stmt * visit( const ast::SuspendStmt * ) override final; 164 const ast::WhenClause * visit( const ast::WhenClause * ) override final; 164 165 const ast::Stmt * visit( const ast::WaitForStmt * ) override final; 165 166 const ast::WaitForClause * visit( const ast::WaitForClause * ) override final; 167 const ast::Stmt * visit( const ast::WaitUntilStmt * ) override final; 166 168 const ast::Decl * visit( const ast::WithStmt * ) override final; 167 169 const ast::NullStmt * visit( const ast::NullStmt * ) override final; -
src/AST/Pass.impl.hpp
r4daf79f rc083c3d 1066 1066 1067 1067 //-------------------------------------------------------------------------- 1068 // WhenClause 1069 template< typename core_t > 1070 const ast::WhenClause * ast::Pass< core_t >::visit( const ast::WhenClause * node ) { 1071 VISIT_START( node ); 1072 1073 if ( __visit_children() ) { 1074 maybe_accept( node, &WhenClause::target ); 1075 maybe_accept( node, &WhenClause::stmt ); 1076 maybe_accept( node, &WhenClause::when_cond ); 1077 } 1078 1079 VISIT_END( WhenClause, node ); 1080 } 1081 1082 //-------------------------------------------------------------------------- 1068 1083 // WaitForStmt 1069 1084 template< typename core_t > … … 1090 1105 1091 1106 if ( __visit_children() ) { 1092 maybe_accept( node, &WaitForClause::target _func);1107 maybe_accept( node, &WaitForClause::target ); 1093 1108 maybe_accept( node, &WaitForClause::target_args ); 1094 1109 maybe_accept( node, &WaitForClause::stmt ); 1095 maybe_accept( node, &WaitForClause:: cond );1110 maybe_accept( node, &WaitForClause::when_cond ); 1096 1111 } 1097 1112 1098 1113 VISIT_END( WaitForClause, node ); 1114 } 1115 1116 //-------------------------------------------------------------------------- 1117 // WaitUntilStmt 1118 template< typename core_t > 1119 const ast::Stmt * ast::Pass< core_t >::visit( const ast::WaitUntilStmt * node ) { 1120 VISIT_START( node ); 1121 1122 if ( __visit_children() ) { 1123 maybe_accept( node, &WaitUntilStmt::clauses ); 1124 maybe_accept( node, &WaitUntilStmt::timeout_time ); 1125 maybe_accept( node, &WaitUntilStmt::timeout_stmt ); 1126 maybe_accept( node, &WaitUntilStmt::timeout_cond ); 1127 maybe_accept( node, &WaitUntilStmt::else_stmt ); 1128 maybe_accept( node, &WaitUntilStmt::else_cond ); 1129 } 1130 1131 VISIT_END( Stmt, node ); 1099 1132 } 1100 1133 -
src/AST/Print.cpp
r4daf79f rc083c3d 208 208 } 209 209 210 void print( const ast::WaitStmt * node ) { 211 if ( node->timeout_time ) { 212 os << indent-1 << "timeout of:" << endl; 213 node->timeout_time->accept( *this ); 214 215 if ( node->timeout_stmt ) { 216 os << indent-1 << "... with statment:" << endl; 217 node->timeout_stmt->accept( *this ); 218 } 219 220 if ( node->timeout_cond ) { 221 os << indent-1 << "... with condition:" << endl; 222 node->timeout_cond->accept( *this ); 223 } 224 } 225 226 if ( node->else_stmt ) { 227 os << indent-1 << "else:" << endl; 228 node->else_stmt->accept( *this ); 229 230 if ( node->else_cond ) { 231 os << indent-1 << "... with condition:" << endl; 232 node->else_cond->accept( *this ); 233 } 234 } 235 } 236 210 237 void preprint( const ast::NamedTypeDecl * node ) { 211 238 if ( ! node->name.empty() ) { … … 756 783 } 757 784 785 virtual const ast::WhenClause * visit( const ast::WhenClause * node ) override final { 786 os << indent-1 << "target: "; 787 safe_print( node->target ); 788 789 if ( node->stmt ) { 790 os << indent-1 << "... with statment:" << endl; 791 node->stmt->accept( *this ); 792 } 793 794 if ( node->when_cond ) { 795 os << indent-1 << "... with when condition:" << endl; 796 node->when_cond->accept( *this ); 797 } 798 799 return node; 800 } 801 758 802 virtual const ast::Stmt * visit( const ast::WaitForStmt * node ) override final { 759 803 os << "Waitfor Statement" << endl; … … 793 837 virtual const ast::WaitForClause * visit( const ast::WaitForClause * node ) override final { 794 838 os << indent-1 << "target function: "; 795 safe_print( node->target _func);839 safe_print( node->target ); 796 840 797 841 if ( !node->target_args.empty() ) { … … 807 851 } 808 852 809 if ( node-> cond ) {853 if ( node->when_cond ) { 810 854 os << indent-1 << "... with condition:" << endl; 811 node->cond->accept( *this ); 812 } 813 855 node->when_cond->accept( *this ); 856 } 857 858 return node; 859 } 860 861 virtual const ast::Stmt * visit( const ast::WaitUntilStmt * node ) override final { 862 os << "Waituntil Statement" << endl; 863 indent += 2; 864 for( const auto & clause : node->clauses ) { 865 clause->accept( *this ); 866 } 867 print(node); // calls print( const ast::WaitStmt * node ) 814 868 return node; 815 869 } -
src/AST/Stmt.hpp
r4daf79f rc083c3d 378 378 }; 379 379 380 // Waitfor statement: when (...) waitfor (... , ...) ... timeout(...) ... else ...381 class WaitForStmt final : public Stmt { 382 public: 383 std::vector<ptr<WaitForClause>> clauses; 384 380 // Base class of WaitFor/WaitUntil statements 381 // form: KEYWORD(...) ... timeout(...) ... else ... 382 class WaitStmt : public Stmt { 383 public: 384 ptr<Expr> timeout_time; 385 385 ptr<Stmt> timeout_stmt; 386 386 ptr<Expr> timeout_cond; … … 388 388 ptr<Expr> else_cond; 389 389 390 WaitStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 391 : Stmt(loc, std::move(labels)) {} 392 393 private: 394 WaitStmt * clone() const override = 0; 395 MUTATE_FRIEND 396 }; 397 398 // Base class for WaitFor/WaitUntil clauses 399 // form: when( when_cond ) KEYWORD( target ) stmt 400 class WhenClause : public StmtClause { 401 public: 402 ptr<Expr> target; 403 ptr<Stmt> stmt; 404 ptr<Expr> when_cond; 405 406 WhenClause( const CodeLocation & loc ) 407 : StmtClause( loc ) {} 408 409 const WhenClause * accept( Visitor & v ) const override { return v.visit( this ); } 410 private: 411 WhenClause * clone() const override { return new WhenClause{ *this }; } 412 MUTATE_FRIEND 413 }; 414 415 // Waitfor statement: when (...) waitfor (... , ...) ... timeout(...) ... else ... 416 class WaitForStmt final : public WaitStmt { 417 public: 418 std::vector<ptr<WaitForClause>> clauses; 419 390 420 WaitForStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 391 : Stmt(loc, std::move(labels)) {}421 : WaitStmt(loc, std::move(labels)) {} 392 422 393 423 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } … … 398 428 399 429 // Clause in a waitfor statement: waitfor (..., ...) ... 400 class WaitForClause final : public StmtClause { 401 public: 402 ptr<Expr> target_func; 430 class WaitForClause final : public WhenClause { 431 public: 403 432 std::vector<ptr<Expr>> target_args; 404 ptr<Stmt> stmt;405 ptr<Expr> cond;406 433 407 434 WaitForClause( const CodeLocation & loc ) 408 : StmtClause( loc ) {}435 : WhenClause( loc ) {} 409 436 410 437 const WaitForClause * accept( Visitor & v ) const override { return v.visit( this ); } 411 438 private: 412 439 WaitForClause * clone() const override { return new WaitForClause{ *this }; } 440 MUTATE_FRIEND 441 }; 442 443 // waituntil statement: when (...) waituntil (...) ... timeout(...) ... else ... 444 class WaitUntilStmt final : public WaitStmt { 445 public: 446 // Non-ast node used during compilation to store data needed to generate predicates 447 // and set initial status values for clauses 448 // Used to create a tree corresponding to the structure of the clauses in a WaitUntil 449 struct ClauseNode { 450 enum Op { AND, OR, LEFT_OR, LEAF, ELSE, TIMEOUT } op; // operation/type tag 451 // LEFT_OR used with TIMEOUT/ELSE to indicate that we ignore right hand side after parsing 452 453 ClauseNode * left; 454 ClauseNode * right; 455 WhenClause * leaf; // only set if this node is a leaf (points into vector of clauses) 456 457 bool ambiguousWhen; // used to paint nodes of predicate tree based on when() clauses 458 bool whenState; // used to track if when_cond is toggled on or off for generating init values 459 bool childOfAnd; // true on leaf nodes that are children of AND, false otherwise 460 461 ClauseNode( Op op, ClauseNode * left, ClauseNode * right ) 462 : op(op), left(left), right(right), leaf(nullptr), 463 ambiguousWhen(false), whenState(true), childOfAnd(false) {} 464 ClauseNode( Op op, WhenClause * leaf ) 465 : op(op), left(nullptr), right(nullptr), leaf(leaf), 466 ambiguousWhen(false), whenState(true), childOfAnd(false) {} 467 ClauseNode( WhenClause * leaf ) : ClauseNode(LEAF, leaf) {} 468 469 ~ClauseNode() { 470 if ( left ) delete left; 471 if ( right ) delete right; 472 } 473 }; 474 475 std::vector<ptr<WhenClause>> clauses; 476 ClauseNode * predicateTree; 477 478 WaitUntilStmt( const CodeLocation & loc, const std::vector<Label> && labels = {} ) 479 : WaitStmt(loc, std::move(labels)) {} 480 481 ~WaitUntilStmt() { delete predicateTree; } 482 483 const Stmt * accept( Visitor & v ) const override { return v.visit( this ); } 484 private: 485 WaitUntilStmt * clone() const override { return new WaitUntilStmt{ *this }; } 413 486 MUTATE_FRIEND 414 487 }; -
src/AST/Visitor.hpp
r4daf79f rc083c3d 50 50 virtual const ast::FinallyClause * visit( const ast::FinallyClause * ) = 0; 51 51 virtual const ast::Stmt * visit( const ast::SuspendStmt * ) = 0; 52 virtual const ast::WhenClause * visit( const ast::WhenClause * ) = 0; 52 53 virtual const ast::Stmt * visit( const ast::WaitForStmt * ) = 0; 53 54 virtual const ast::WaitForClause * visit( const ast::WaitForClause * ) = 0; 55 virtual const ast::Stmt * visit( const ast::WaitUntilStmt * ) = 0; 54 56 virtual const ast::Decl * visit( const ast::WithStmt * ) = 0; 55 57 virtual const ast::NullStmt * visit( const ast::NullStmt * ) = 0; -
src/Common/CodeLocationTools.cpp
r4daf79f rc083c3d 128 128 macro(FinallyClause, FinallyClause) \ 129 129 macro(SuspendStmt, Stmt) \ 130 macro(WhenClause, WhenClause) \ 130 131 macro(WaitForStmt, Stmt) \ 131 132 macro(WaitForClause, WaitForClause) \ 133 macro(WaitUntilStmt, Stmt) \ 132 134 macro(WithStmt, Decl) \ 133 135 macro(NullStmt, NullStmt) \ -
src/Concurrency/WaitforNew.cpp
r4daf79f rc083c3d 305 305 306 306 const ast::VariableExpr * variableExpr = 307 clause->target _func.as<ast::VariableExpr>();307 clause->target.as<ast::VariableExpr>(); 308 308 ast::Expr * castExpr = new ast::CastExpr( 309 309 location, 310 310 new ast::CastExpr( 311 311 location, 312 clause->target _func,312 clause->target, 313 313 ast::deepCopy( variableExpr->result.get() ), 314 314 ast::GeneratedCast ), … … 325 325 326 326 ResolveContext context{ symtab, transUnit().global }; 327 out->push_back( maybeCond( location, clause-> cond.get(), {327 out->push_back( maybeCond( location, clause->when_cond.get(), { 328 328 makeAccStmt( location, acceptables, index, "is_dtor", 329 detectIsDtor( location, clause->target _func), context ),329 detectIsDtor( location, clause->target ), context ), 330 330 makeAccStmt( location, acceptables, index, "func", 331 331 funcExpr, context ), -
src/Concurrency/module.mk
r4daf79f rc083c3d 23 23 Concurrency/WaitforNew.cpp \ 24 24 Concurrency/Waitfor.cc \ 25 Concurrency/Waitfor.h 25 Concurrency/Waitfor.h \ 26 Concurrency/Waituntil.cpp \ 27 Concurrency/Waituntil.hpp -
src/Parser/StatementNode.cc
r4daf79f rc083c3d 328 328 ast::WaitForStmt * build_waitfor( const CodeLocation & location, ast::WaitForStmt * existing, ExpressionNode * when, ExpressionNode * targetExpr, StatementNode * stmt ) { 329 329 auto clause = new ast::WaitForClause( location ); 330 clause->target _func= maybeBuild( targetExpr );330 clause->target = maybeBuild( targetExpr ); 331 331 clause->stmt = maybeMoveBuild( stmt ); 332 clause-> cond = notZeroExpr( maybeMoveBuild( when ) );332 clause->when_cond = notZeroExpr( maybeMoveBuild( when ) ); 333 333 334 334 ExpressionNode * next = dynamic_cast<ExpressionNode *>( targetExpr->get_next() ); … … 359 359 return existing; 360 360 } // build_waitfor_timeout 361 362 ast::WaitUntilStmt::ClauseNode * build_waituntil_clause( const CodeLocation & loc, ExpressionNode * when, ExpressionNode * targetExpr, StatementNode * stmt ) { 363 ast::WhenClause * clause = new ast::WhenClause( loc ); 364 clause->when_cond = notZeroExpr( maybeMoveBuild( when ) ); 365 clause->stmt = maybeMoveBuild( stmt ); 366 clause->target = maybeMoveBuild( targetExpr ); 367 return new ast::WaitUntilStmt::ClauseNode( clause ); 368 } 369 ast::WaitUntilStmt::ClauseNode * build_waituntil_else( const CodeLocation & loc, ExpressionNode * when, StatementNode * stmt ) { 370 ast::WhenClause * clause = new ast::WhenClause( loc ); 371 clause->when_cond = notZeroExpr( maybeMoveBuild( when ) ); 372 clause->stmt = maybeMoveBuild( stmt ); 373 return new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::ELSE, clause ); 374 } 375 ast::WaitUntilStmt::ClauseNode * build_waituntil_timeout( const CodeLocation & loc, ExpressionNode * when, ExpressionNode * timeout, StatementNode * stmt ) { 376 ast::WhenClause * clause = new ast::WhenClause( loc ); 377 clause->when_cond = notZeroExpr( maybeMoveBuild( when ) ); 378 clause->stmt = maybeMoveBuild( stmt ); 379 clause->target = maybeMoveBuild( timeout ); 380 return new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::TIMEOUT, clause ); 381 } 382 383 ast::WaitUntilStmt * build_waituntil_stmt( const CodeLocation & loc, ast::WaitUntilStmt::ClauseNode * root ) { 384 ast::WaitUntilStmt * retStmt = new ast::WaitUntilStmt( loc ); 385 retStmt->predicateTree = root; 386 387 // iterative tree traversal 388 std::vector<ast::WaitUntilStmt::ClauseNode *> nodeStack; // stack needed for iterative traversal 389 ast::WaitUntilStmt::ClauseNode * currNode = nullptr; 390 ast::WaitUntilStmt::ClauseNode * lastInternalNode = nullptr; 391 ast::WaitUntilStmt::ClauseNode * cleanup = nullptr; // used to cleanup removed else/timeout 392 nodeStack.push_back(root); 393 394 do { 395 currNode = nodeStack.back(); 396 nodeStack.pop_back(); // remove node since it will be processed 397 398 switch (currNode->op) { 399 case ast::WaitUntilStmt::ClauseNode::LEAF: 400 retStmt->clauses.push_back(currNode->leaf); 401 break; 402 case ast::WaitUntilStmt::ClauseNode::ELSE: 403 retStmt->else_stmt = currNode->leaf->stmt 404 ? ast::deepCopy( currNode->leaf->stmt ) 405 : nullptr; 406 407 retStmt->else_cond = currNode->leaf->when_cond 408 ? ast::deepCopy( currNode->leaf->when_cond ) 409 : nullptr; 410 411 delete currNode->leaf; 412 break; 413 case ast::WaitUntilStmt::ClauseNode::TIMEOUT: 414 retStmt->timeout_time = currNode->leaf->target 415 ? ast::deepCopy( currNode->leaf->target ) 416 : nullptr; 417 retStmt->timeout_stmt = currNode->leaf->stmt 418 ? ast::deepCopy( currNode->leaf->stmt ) 419 : nullptr; 420 retStmt->timeout_cond = currNode->leaf->when_cond 421 ? ast::deepCopy( currNode->leaf->when_cond ) 422 : nullptr; 423 424 delete currNode->leaf; 425 break; 426 default: 427 nodeStack.push_back( currNode->right ); // process right after left 428 nodeStack.push_back( currNode->left ); 429 430 // Cut else/timeout out of the tree 431 if ( currNode->op == ast::WaitUntilStmt::ClauseNode::LEFT_OR ) { 432 if ( lastInternalNode ) 433 lastInternalNode->right = currNode->left; 434 else // if not set then root is LEFT_OR 435 retStmt->predicateTree = currNode->left; 436 437 currNode->left = nullptr; 438 cleanup = currNode; 439 } 440 441 lastInternalNode = currNode; 442 break; 443 } 444 } while ( !nodeStack.empty() ); 445 446 if ( cleanup ) delete cleanup; 447 448 return retStmt; 449 } 361 450 362 451 ast::Stmt * build_with( const CodeLocation & location, ExpressionNode * exprs, StatementNode * stmt ) { -
src/Parser/StatementNode.h
r4daf79f rc083c3d 100 100 ast::WaitForStmt * build_waitfor_else( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, StatementNode * stmt ); 101 101 ast::WaitForStmt * build_waitfor_timeout( const CodeLocation &, ast::WaitForStmt * existing, ExpressionNode * when, ExpressionNode * timeout, StatementNode * stmt ); 102 ast::WaitUntilStmt::ClauseNode * build_waituntil_clause( const CodeLocation &, ExpressionNode * when, ExpressionNode * targetExpr, StatementNode * stmt ); 103 ast::WaitUntilStmt::ClauseNode * build_waituntil_else( const CodeLocation &, ExpressionNode * when, StatementNode * stmt ); 104 ast::WaitUntilStmt::ClauseNode * build_waituntil_timeout( const CodeLocation &, ExpressionNode * when, ExpressionNode * timeout, StatementNode * stmt ); 105 ast::WaitUntilStmt * build_waituntil_stmt( const CodeLocation &, ast::WaitUntilStmt::ClauseNode * root ); 102 106 ast::Stmt * build_with( const CodeLocation &, ExpressionNode * exprs, StatementNode * stmt ); 103 107 ast::Stmt * build_mutex( const CodeLocation &, ExpressionNode * exprs, StatementNode * stmt ); -
src/Parser/parser.yy
r4daf79f rc083c3d 307 307 ClauseNode * clause; 308 308 ast::WaitForStmt * wfs; 309 ast::WaitUntilStmt::ClauseNode * wucn; 309 310 CondCtl * ifctl; 310 311 ForCtrl * forctl; … … 427 428 %type<expr> when_clause when_clause_opt waitfor waituntil timeout 428 429 %type<stmt> waitfor_statement waituntil_statement 429 %type<wfs> wor_waitfor_clause waituntil_clause wand_waituntil_clause wor_waituntil_clause 430 %type<wfs> wor_waitfor_clause 431 %type<wucn> waituntil_clause wand_waituntil_clause wor_waituntil_clause 430 432 431 433 // declarations … … 1685 1687 waituntil_clause: 1686 1688 when_clause_opt waituntil statement 1687 { printf( "waituntil_clause 1\n" ); $$ = nullptr; }1689 { $$ = build_waituntil_clause( yylloc, $1, $2, maybe_build_compound( yylloc, $3 ) ); } 1688 1690 | '(' wor_waituntil_clause ')' 1689 { printf( "waituntil_clause 2\n" ); $$ = nullptr; }1691 { $$ = $2; } 1690 1692 ; 1691 1693 1692 1694 wand_waituntil_clause: 1693 1695 waituntil_clause %prec THEN 1694 { printf( "wand_waituntil_clause 1\n" ); $$ = nullptr; }1696 { $$ = $1; } 1695 1697 | waituntil_clause wand wand_waituntil_clause 1696 { printf( "wand_waituntil_clause 2\n" ); $$ = nullptr; }1698 { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::AND, $1, $3 ); } 1697 1699 ; 1698 1700 1699 1701 wor_waituntil_clause: 1700 1702 wand_waituntil_clause 1701 { printf( "wor_waituntil_clause 1\n" ); $$ = nullptr; }1703 { $$ = $1; } 1702 1704 | wor_waituntil_clause wor wand_waituntil_clause 1703 { printf( "wor_waituntil_clause 2\n" ); $$ = nullptr; }1705 { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::OR, $1, $3 ); } 1704 1706 | wor_waituntil_clause wor when_clause_opt ELSE statement 1705 { printf( "wor_waituntil_clause 3\n" ); $$ = nullptr; }1707 { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::LEFT_OR, $1, build_waituntil_else( yylloc, $3, maybe_build_compound( yylloc, $5 ) ) ); } 1706 1708 | wor_waituntil_clause wor when_clause_opt timeout statement %prec THEN 1707 { printf( "wor_waituntil_clause 4\n" ); $$ = nullptr; }1709 { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::LEFT_OR, $1, build_waituntil_timeout( yylloc, $3, $4, maybe_build_compound( yylloc, $5 ) ) ); } 1708 1710 // "else" must be conditional after timeout or timeout is never triggered (i.e., it is meaningless) 1709 1711 | wor_waituntil_clause wor when_clause_opt timeout statement wor ELSE statement // syntax error 1710 1712 { SemanticError( yylloc, "else clause must be conditional after timeout or timeout never triggered." ); $$ = nullptr; } 1711 1713 | wor_waituntil_clause wor when_clause_opt timeout statement wor when_clause ELSE statement 1712 { printf( "wor_waituntil_clause 6\n" ); $$ = nullptr; } 1714 { $$ = new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::LEFT_OR, $1, 1715 new ast::WaitUntilStmt::ClauseNode( ast::WaitUntilStmt::ClauseNode::Op::OR, 1716 build_waituntil_timeout( yylloc, $3, $4, maybe_build_compound( yylloc, $5 ) ), 1717 build_waituntil_else( yylloc, $7, maybe_build_compound( yylloc, $9 ) ) ) ); } 1713 1718 ; 1714 1719 … … 1716 1721 wor_waituntil_clause %prec THEN 1717 1722 // SKULLDUGGERY: create an empty compound statement to test parsing of waituntil statement. 1718 { $$ = new StatementNode( build_compound( yylloc, nullptr ) ); } 1723 { 1724 $$ = new StatementNode( build_waituntil_stmt( yylloc, $1 ) ); 1725 // $$ = new StatementNode( build_compound( yylloc, nullptr ) ); 1726 } 1719 1727 ; 1720 1728 -
src/ResolvExpr/Resolver.cc
r4daf79f rc083c3d 1730 1730 1731 1731 // Find all candidates for a function in canonical form 1732 funcFinder.find( clause.target _func, ResolvMode::withAdjustment() );1732 funcFinder.find( clause.target, ResolvMode::withAdjustment() ); 1733 1733 1734 1734 if ( funcFinder.candidates.empty() ) { 1735 1735 stringstream ss; 1736 1736 ss << "Use of undeclared indentifier '"; 1737 ss << clause.target _func.strict_as< ast::NameExpr >()->name;1737 ss << clause.target.strict_as< ast::NameExpr >()->name; 1738 1738 ss << "' in call to waitfor"; 1739 1739 SemanticError( stmt->location, ss.str() ); … … 1922 1922 auto clause2 = new ast::WaitForClause( clause.location ); 1923 1923 1924 clause2->target _func= funcCandidates.front()->expr;1924 clause2->target = funcCandidates.front()->expr; 1925 1925 1926 1926 clause2->target_args.reserve( clause.target_args.size() ); … … 1945 1945 1946 1946 // Resolve the conditions as if it were an IfStmt, statements normally 1947 clause2-> cond = findSingleExpression( clause.cond, context );1947 clause2->when_cond = findSingleExpression( clause.when_cond, context ); 1948 1948 clause2->stmt = clause.stmt->accept( *visitor ); 1949 1949 -
src/main.cc
r4daf79f rc083c3d 48 48 #include "Concurrency/Keywords.h" // for implementMutex, implement... 49 49 #include "Concurrency/Waitfor.h" // for generateWaitfor 50 #include "Concurrency/Waituntil.hpp" // for generateWaitUntil 50 51 #include "ControlStruct/ExceptDecl.h" // for translateExcept 51 52 #include "ControlStruct/ExceptTranslate.h" // for translateThrows, translat... … … 340 341 PASS( "Implement Concurrent Keywords", Concurrency::implementKeywords, transUnit ); 341 342 PASS( "Forall Pointer Decay", Validate::decayForallPointers, transUnit ); 343 PASS( "Implement Waituntil", Concurrency::generateWaitUntil, transUnit ); 342 344 PASS( "Hoist Control Declarations", ControlStruct::hoistControlDecls, transUnit ); 343 345 -
tests/Makefile.am
r4daf79f rc083c3d 11 11 ## Created On : Sun May 31 09:08:15 2015 12 12 ## Last Modified By : Peter A. Buhr 13 ## Last Modified On : Mon May 1 1 6:45:07202314 ## Update Count : 14 413 ## Last Modified On : Mon May 1 17:25:24 2023 14 ## Update Count : 145 15 15 ############################################################################### 16 16 … … 116 116 #---------------------------------------------------------------------------------------------------------------- 117 117 118 all-local : 118 all-local : # This name is important to automake and implies the default build target. 119 119 @+$(TEST_PY) --debug=$(debug) --install=$(installed) --archive-errors=$(archiveerrors) $(concurrent) $(timeouts) $(ARCH) --all # '@' => do not echo command (SILENT), '+' => allows recursive make from within python program 120 120 -
tests/concurrent/futures/select_future.cfa
r4daf79f rc083c3d 196 196 delete( shared_future ); 197 197 printf( "done 3\n" ); 198 199 // C_TODO: add test for select statement once it is implemented200 198 }
Note: See TracChangeset
for help on using the changeset viewer.