Changeset e873838
- Timestamp:
- Nov 2, 2020, 12:44:43 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 58688bf, 82f791f
- Parents:
- f7136f7
- Location:
- libcfa/src/concurrency
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io/setup.cfa
rf7136f7 re873838 149 149 id.full_proc = false; 150 150 id.id = doregister(&id); 151 kernelTLS.this_proc_id = &id; 151 152 __cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" ); 152 153 … … 180 181 kernelTLS.this_stats = io_ctx->self.curr_cluster->stats; 181 182 #endif 182 __post( io_ctx->sem, &id);183 post( io_ctx->sem ); 183 184 } 184 185 } … … 235 236 if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) { 236 237 237 ready_schedule_lock( (struct __processor_id_t *)active_processor());238 ready_schedule_lock(); 238 239 239 240 // This is the tricky case … … 253 254 thrd.preempted = __NO_PREEMPTION; 254 255 255 ready_schedule_unlock( (struct __processor_id_t *)active_processor());256 ready_schedule_unlock(); 256 257 257 258 // Pretend like the thread was blocked all along -
libcfa/src/concurrency/kernel.cfa
rf7136f7 re873838 108 108 static $thread * __next_thread_slow(cluster * this); 109 109 static void __run_thread(processor * this, $thread * dst); 110 static void __wake_one( struct __processor_id_t * id,cluster * cltr);110 static void __wake_one(cluster * cltr); 111 111 112 112 static void push (__cluster_idles & idles, processor & proc); … … 282 282 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 283 283 // The thread was preempted, reschedule it and reset the flag 284 __schedule_thread( (__processor_id_t*)this,thrd_dst );284 __schedule_thread( thrd_dst ); 285 285 break RUNNING; 286 286 } … … 358 358 // Scheduler routines 359 359 // KERNEL ONLY 360 void __schedule_thread( struct __processor_id_t * id,$thread * thrd ) {360 void __schedule_thread( $thread * thrd ) { 361 361 /* paranoid */ verify( thrd ); 362 362 /* paranoid */ verify( thrd->state != Halted ); 363 363 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 364 /* paranoid */ verify( kernelTLS.this_proc_id ); 364 365 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 365 366 /* paranoid */ if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, … … 374 375 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; 375 376 376 ready_schedule_lock ( id);377 ready_schedule_lock(); 377 378 push( thrd->curr_cluster, thrd ); 378 __wake_one( id,thrd->curr_cluster);379 ready_schedule_unlock( id);379 __wake_one(thrd->curr_cluster); 380 ready_schedule_unlock(); 380 381 381 382 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 384 385 // KERNEL ONLY 385 386 static inline $thread * __next_thread(cluster * this) with( *this ) { 386 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 387 388 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 387 /* paranoid */ verify( kernelTLS.this_proc_id ); 388 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 389 390 ready_schedule_lock(); 389 391 $thread * thrd = pop( this ); 390 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 391 392 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 392 ready_schedule_unlock(); 393 394 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 395 /* paranoid */ verify( kernelTLS.this_proc_id ); 393 396 return thrd; 394 397 } … … 396 399 // KERNEL ONLY 397 400 static inline $thread * __next_thread_slow(cluster * this) with( *this ) { 398 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 399 400 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 401 /* paranoid */ verify( kernelTLS.this_proc_id ); 402 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 403 404 ready_schedule_lock(); 401 405 $thread * thrd = pop_slow( this ); 402 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 403 404 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 406 ready_schedule_unlock(); 407 408 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 409 /* paranoid */ verify( kernelTLS.this_proc_id ); 405 410 return thrd; 406 411 } 407 412 408 // KERNEL ONLY unpark with out disabling interrupts 409 void __unpark( struct __processor_id_t * id, $thread * thrd ) { 413 void unpark( $thread * thrd ) { 414 if( !thrd ) return; 415 416 /* paranoid */ verify( kernelTLS.this_proc_id ); 417 bool full = kernelTLS.this_proc_id->full_proc; 418 if(full) disable_interrupts(); 419 420 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 410 421 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 411 422 switch(old_ticket) { … … 418 429 419 430 // Wake lost the race, 420 __schedule_thread( id,thrd );431 __schedule_thread( thrd ); 421 432 break; 422 433 default: … … 424 435 abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name); 425 436 } 426 } 427 428 void unpark( $thread * thrd ) { 429 if( !thrd ) return; 430 431 disable_interrupts(); 432 __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd ); 433 enable_interrupts( __cfaabi_dbg_ctx ); 437 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 438 439 if(full) enable_interrupts( __cfaabi_dbg_ctx ); 440 /* paranoid */ verify( kernelTLS.this_proc_id ); 434 441 } 435 442 … … 505 512 //============================================================================================= 506 513 // Wake a thread from the front if there are any 507 static void __wake_one( struct __processor_id_t * id,cluster * this) {508 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 509 /* paranoid */ verify( ready_schedule_islocked( id) );514 static void __wake_one(cluster * this) { 515 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 516 /* paranoid */ verify( ready_schedule_islocked() ); 510 517 511 518 // Check if there is a sleeping processor … … 525 532 #endif 526 533 527 /* paranoid */ verify( ready_schedule_islocked( id) );534 /* paranoid */ verify( ready_schedule_islocked() ); 528 535 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 529 536 -
libcfa/src/concurrency/kernel/fwd.hfa
rf7136f7 re873838 35 35 extern "Cforall" { 36 36 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 37 struct $thread * volatile this_thread; 38 struct processor * volatile this_processor; 39 struct __stats_t * volatile this_stats; 37 struct $thread * volatile this_thread; 38 struct processor * volatile this_processor; 39 struct __processor_id_t * volatile this_proc_id; 40 struct __stats_t * volatile this_stats; 40 41 41 42 struct { -
libcfa/src/concurrency/kernel/startup.cfa
rf7136f7 re873838 122 122 NULL, 123 123 NULL, 124 NULL, 124 125 { 1, false, false }, 125 126 }; … … 212 213 //initialize the global state variables 213 214 kernelTLS.this_processor = mainProcessor; 215 kernelTLS.this_proc_id = (__processor_id_t*)mainProcessor; 214 216 kernelTLS.this_thread = mainThread; 215 217 … … 227 229 // Add the main thread to the ready queue 228 230 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 229 __schedule_thread( (__processor_id_t *)mainProcessor,mainThread);231 __schedule_thread(mainThread); 230 232 231 233 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX … … 324 326 processor * proc = (processor *) arg; 325 327 kernelTLS.this_processor = proc; 328 kernelTLS.this_proc_id = (__processor_id_t*)proc; 326 329 kernelTLS.this_thread = 0p; 327 330 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; -
libcfa/src/concurrency/kernel_private.hfa
rf7136f7 re873838 33 33 } 34 34 35 void __schedule_thread( struct __processor_id_t *,$thread * )35 void __schedule_thread( $thread * ) 36 36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__)) 37 __attribute__((nonnull ( 2)))37 __attribute__((nonnull (1))) 38 38 #endif 39 39 ; … … 63 63 ) 64 64 65 // KERNEL ONLY unpark with out disabling interrupts66 void __unpark( struct __processor_id_t *, $thread * thrd );67 68 65 #define TICKET_BLOCKED (-1) // thread is blocked 69 66 #define TICKET_RUNNING ( 0) // thread is running 70 67 #define TICKET_UNBLOCK ( 1) // thread should ignore next block 71 72 static inline bool __post(single_sem & this, struct __processor_id_t * id) {73 for() {74 struct $thread * expected = this.ptr;75 if(expected == 1p) return false;76 if(expected == 0p) {77 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {78 return false;79 }80 }81 else {82 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {83 __unpark( id, expected );84 return true;85 }86 }87 }88 }89 68 90 69 //----------------------------------------------------------------------------- … … 201 180 // Reader side : acquire when using the ready queue to schedule but not 202 181 // creating/destroying queues 203 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) { 204 unsigned iproc = proc->id; 205 /*paranoid*/ verify(data[iproc].handle == proc); 182 static inline void ready_schedule_lock(void) with(*__scheduler_lock) { 183 /*paranoid*/ verify( kernelTLS.this_proc_id ); 184 185 unsigned iproc = kernelTLS.this_proc_id->id; 186 /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id); 206 187 /*paranoid*/ verify(iproc < ready); 207 188 … … 225 206 } 226 207 227 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) { 228 unsigned iproc = proc->id; 229 /*paranoid*/ verify(data[iproc].handle == proc); 208 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { 209 /*paranoid*/ verify( kernelTLS.this_proc_id ); 210 211 unsigned iproc = kernelTLS.this_proc_id->id; 212 /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id); 230 213 /*paranoid*/ verify(iproc < ready); 231 214 /*paranoid*/ verify(data[iproc].lock); … … 239 222 240 223 #ifdef __CFA_WITH_VERIFY__ 241 static inline bool ready_schedule_islocked( struct __processor_id_t * proc) { 224 static inline bool ready_schedule_islocked(void) { 225 /*paranoid*/ verify( kernelTLS.this_proc_id ); 226 __processor_id_t * proc = kernelTLS.this_proc_id; 242 227 return __scheduler_lock->data[proc->id].owned; 243 228 } -
libcfa/src/concurrency/preemption.cfa
rf7136f7 re873838 38 38 // FwdDeclarations : timeout handlers 39 39 static void preempt( processor * this ); 40 static void timeout( struct __processor_id_t * id,$thread * this );40 static void timeout( $thread * this ); 41 41 42 42 // FwdDeclarations : Signal handlers … … 91 91 92 92 // Tick one frame of the Discrete Event Simulation for alarms 93 static void tick_preemption( struct __processor_id_t * id) {93 static void tick_preemption(void) { 94 94 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 95 95 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading … … 109 109 } 110 110 else { 111 timeout( id,node->thrd );111 timeout( node->thrd ); 112 112 } 113 113 … … 270 270 271 271 // reserved for future use 272 static void timeout( struct __processor_id_t * id,$thread * this ) {272 static void timeout( $thread * this ) { 273 273 #if !defined( __CFA_NO_STATISTICS__ ) 274 274 kernelTLS.this_stats = this->curr_cluster->stats; 275 275 #endif 276 __unpark( id,this );276 unpark( this ); 277 277 } 278 278 … … 413 413 id.full_proc = false; 414 414 id.id = doregister(&id); 415 kernelTLS.this_proc_id = &id; 415 416 416 417 // Block sigalrms to control when they arrive … … 458 459 // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" ); 459 460 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 460 tick_preemption( &id);461 tick_preemption(); 461 462 unlock( event_kernel->lock ); 462 463 break; -
libcfa/src/concurrency/thread.cfa
rf7136f7 re873838 127 127 verify( this_thrd->context.SP ); 128 128 129 __schedule_thread( (__processor_id_t *)kernelTLS.this_processor, this_thrd);129 __schedule_thread( this_thrd ); 130 130 enable_interrupts( __cfaabi_dbg_ctx ); 131 131 }
Note: See TracChangeset
for help on using the changeset viewer.