Changes in / [b4130f9:7c9ac4a]
- Files:
-
- 1 added
- 7 edited
-
benchmark/readyQ/cycle.cc (added)
-
benchmark/readyQ/cycle.cfa (modified) (1 diff)
-
benchmark/readyQ/cycle.cpp (modified) (3 diffs)
-
benchmark/readyQ/cycle.go (modified) (1 diff)
-
libcfa/src/concurrency/invoke.h (modified) (1 diff)
-
libcfa/src/concurrency/kernel.cfa (modified) (5 diffs)
-
libcfa/src/concurrency/monitor.cfa (modified) (8 diffs)
-
libcfa/src/concurrency/thread.cfa (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
benchmark/readyQ/cycle.cfa
rb4130f9 r7c9ac4a 77 77 78 78 for(i; tthreads) { 79 post( thddata[i].self ); 79 80 Partner & partner = join( *threads[i] ).partner; 80 81 global_counter += partner.count; -
benchmark/readyQ/cycle.cpp
rb4130f9 r7c9ac4a 1 1 2 2 #include "rq_bench.hpp" 3 #include <libfibre/fibre.h> 3 4 5 class __attribute__((aligned(128))) bench_sem { 6 Fibre * volatile ptr = nullptr; 7 public: 8 inline bool wait() { 9 static Fibre * const ready = reinterpret_cast<Fibre * const>(1ull); 10 for(;;) { 11 Fibre * expected = this->ptr; 12 if(expected == ready) { 13 if(__atomic_compare_exchange_n(&this->ptr, &expected, nullptr, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 14 return false; 15 } 16 } 17 else { 18 /* paranoid */ assert( expected == nullptr ); 19 if(__atomic_compare_exchange_n(&this->ptr, &expected, fibre_self(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 20 fibre_park(); 21 return true; 22 } 23 } 24 25 } 26 } 27 28 inline bool post() { 29 static Fibre * const ready = reinterpret_cast<Fibre * const>(1ull); 30 for(;;) { 31 Fibre * expected = this->ptr; 32 if(expected == ready) return false; 33 if(expected == nullptr) { 34 if(__atomic_compare_exchange_n(&this->ptr, &expected, ready, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 35 return false; 36 } 37 } 38 else { 39 if(__atomic_compare_exchange_n(&this->ptr, &expected, nullptr, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 40 fibre_unpark( expected ); 41 return true; 42 } 43 } 44 } 45 } 46 }; 4 47 struct Partner { 5 48 unsigned long long count = 0; … … 55 98 thddata[i].self.post(); 56 99 } 57 wait (start, is_tty);100 wait<Fibre>(start, is_tty); 58 101 59 102 stop = true; … … 62 105 63 106 for(int i = 0; i < tthreads; i++) { 107 thddata[i].self.post(); 64 108 fibre_join( threads[i], nullptr ); 65 109 global_counter += thddata[i].count; -
benchmark/readyQ/cycle.go
rb4130f9 r7c9ac4a 63 63 global_counter := uint64(0) 64 64 for i := 0; i < tthreads; i++ { 65 select { 66 case channels[i] <- 0: 67 default: 68 } 65 69 global_counter += <- result 66 70 } -
libcfa/src/concurrency/invoke.h
rb4130f9 r7c9ac4a 68 68 }; 69 69 70 enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled };70 enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled, Halting }; 71 71 72 72 struct $coroutine { -
libcfa/src/concurrency/kernel.cfa
rb4130f9 r7c9ac4a 251 251 /* paranoid */ verify( ! __preemption_enabled() ); 252 252 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); 253 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); 253 254 /* paranoid */ verify( thrd_dst->context.SP ); 254 255 /* paranoid */ verify( thrd_dst->state != Halted ); … … 267 268 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ), "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 268 269 /* paranoid */ verify( thrd_dst->context.SP ); 270 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); 269 271 /* paranoid */ verify( kernelTLS().this_thread == thrd_dst ); 270 272 /* paranoid */ verify( ! __preemption_enabled() ); … … 286 288 } 287 289 288 if(unlikely(thrd_dst->state == Halt ed)) {290 if(unlikely(thrd_dst->state == Halting)) { 289 291 // The thread has halted, it should never be scheduled/run again 290 292 // finish the thread … … 360 362 void __schedule_thread( $thread * thrd ) { 361 363 /* paranoid */ verify( ! __preemption_enabled() ); 364 /* paranoid */ verify( kernelTLS().this_proc_id ); 362 365 /* paranoid */ verify( thrd ); 363 366 /* paranoid */ verify( thrd->state != Halted ); 364 /* paranoid */ verify( kernelTLS().this_proc_id);367 /* paranoid */ verify( thrd->curr_cluster ); 365 368 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 366 369 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, … … 473 476 disable_interrupts(); 474 477 475 thrd->state = Halted; 478 /* paranoid */ verify( ! __preemption_enabled() ); 479 /* paranoid */ verify( thrd->state == Active ); 480 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary ); 481 /* paranoid */ verify( kernelTLS().this_thread == thrd ); 482 /* paranoid */ verify( thrd->context.SP ); 483 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd ); 484 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd ); 485 486 thrd->state = Halting; 476 487 if( TICKET_RUNNING != thrd->ticket ) { abort( "Thread terminated with pending unpark" ); } 477 if( thrd != this->owner || this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } 488 if( thrd != this->owner ) { abort( "Thread internal monitor has incorrect owner" ); } 489 if( this->recursion != 1) { abort( "Thread internal monitor has unbalanced recursion" ); } 478 490 479 491 // Leave the thread 480 /* paranoid */ verify( ! __preemption_enabled() );481 492 returnToKernel(); 493 494 // Control flow should never reach here! 482 495 abort(); 483 484 // Control flow should never reach here!485 496 } 486 497 } -
libcfa/src/concurrency/monitor.cfa
rb4130f9 r7c9ac4a 142 142 static void __dtor_enter( $monitor * this, fptr_t func, bool join ) { 143 143 $thread * thrd = active_thread(); 144 #if defined( __CFA_WITH_VERIFY__ ) 145 bool is_thrd = this == &thrd->self_mon; 146 #endif 144 147 145 148 // Lock the monitor spinlock … … 155 158 __set_owner( this, thrd ); 156 159 157 verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 160 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 161 /* paranoid */ verify( !is_thrd || thrd->state == Halted || thrd->state == Cancelled ); 158 162 159 163 unlock( this->lock ); … … 166 170 } 167 171 // SKULLDUGGERY: join will act as a dtor so it would normally trigger to above check 172 // because join will not release the monitor after it executed. 168 173 // to avoid that it sets the owner to the special value thrd | 1p before exiting 169 174 else if( this->owner == ($thread*)(1 | (uintptr_t)thrd) ) { … … 172 177 173 178 // No one has the monitor, just take it 174 this->owner = thrd; 175 176 verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 179 __set_owner( this, thrd ); 180 181 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 182 /* paranoid */ verify( !is_thrd || thrd->state == Halted || thrd->state == Cancelled ); 177 183 178 184 unlock( this->lock ); 179 185 return; 180 186 } 187 188 // The monitor is busy, if this is a thread and the thread owns itself, it better be active 189 /* paranoid */ verify( !is_thrd || this->owner != thrd || (thrd->state != Halted && thrd->state != Cancelled) ); 181 190 182 191 __lock_size_t count = 1; … … 208 217 // Some one was waiting for us, enter 209 218 /* paranoid */ verifyf( active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 219 220 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 221 return; 210 222 } 211 223 else { … … 227 239 return; 228 240 } 229 230 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this);231 232 241 } 233 242 … … 285 294 286 295 // Lock the monitor now 296 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd->canary ); 287 297 /* paranoid */ verify( this->lock.lock ); 298 /* paranoid */ verify( thrd->context.SP ); 299 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd ); 300 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd ); 301 /* paranoid */ verify( ! __preemption_enabled() ); 302 288 303 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 289 /* paranoid */ verify( ! __preemption_enabled() ); 290 /* paranoid */ verify( thrd->state == Halted ); 304 /* paranoid */ verify( thrd->state == Halting ); 291 305 /* paranoid */ verify( this->recursion == 1 ); 292 306 … … 297 311 // Fetch the next thread, can be null 298 312 $thread * new_owner = next_thread( this ); 313 314 // Mark the state as fully halted 315 thrd->state = Halted; 299 316 300 317 // Release the monitor lock -
libcfa/src/concurrency/thread.cfa
rb4130f9 r7c9ac4a 82 82 T & thrd, void(*defaultResumptionHandler)(ThreadCancelled(T) &)) { 83 83 $monitor * m = get_monitor(thrd); 84 $thread * desc = get_thread(thrd); 85 86 // Setup the monitor guard 84 87 void (*dtor)(T& mutex this) = ^?{}; 85 88 bool join = defaultResumptionHandler != (void(*)(ThreadCancelled(T)&))0; 86 89 (this.mg){&m, (void(*)())dtor, join}; 87 90 91 92 /* paranoid */ verifyf( Halted == desc->state || Cancelled == desc->state, "Expected thread to be Halted or Cancelled, was %d\n", (int)desc->state ); 93 88 94 // After the guard set-up and any wait, check for cancellation. 89 $thread * desc = get_thread(thrd);90 95 struct _Unwind_Exception * cancellation = desc->self_cor.cancellation; 91 96 if ( likely( 0p == cancellation ) ) {
Note:
See TracChangeset
for help on using the changeset viewer.