Changes in src/libcfa/concurrency/kernel.c [afd550c:de6319f]
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/kernel.c
rafd550c rde6319f 56 56 // volatile thread_local unsigned short disable_preempt_count = 1; 57 57 58 thread_local struct KernelThreadData kernelT LS= {58 thread_local struct KernelThreadData kernelThreadData = { 59 59 NULL, 60 60 NULL, … … 155 155 terminate(&this); 156 156 verify(this.do_terminate); 157 verify( kernelTLS.this_processor!= &this);157 verify(TL_GET( this_processor ) != &this); 158 158 P( terminated ); 159 verify( kernelTLS.this_processor!= &this);159 verify(TL_GET( this_processor ) != &this); 160 160 pthread_join( kernel_thread, NULL ); 161 161 } … … 196 196 if(readyThread) 197 197 { 198 verify( ! kernelTLS.preemption_state.enabled );198 verify( ! TL_GET( preemption_state ).enabled ); 199 199 200 200 runThread(this, readyThread); 201 201 202 verify( ! kernelTLS.preemption_state.enabled );202 verify( ! TL_GET( preemption_state ).enabled ); 203 203 204 204 //Some actions need to be taken from the kernel … … 221 221 } 222 222 223 // KERNEL ONLY224 223 // runThread runs a thread by context switching 225 224 // from the processor coroutine to the target thread … … 229 228 coroutine_desc * thrd_cor = dst->curr_cor; 230 229 231 // 230 //Reset the terminating actions here 232 231 this->finish.action_code = No_Action; 233 232 234 // 235 kernelTLS.this_thread = dst;233 //Update global state 234 TL_SET( this_thread, dst ); 236 235 237 236 // Context Switch to the thread … … 240 239 } 241 240 242 // KERNEL_ONLY243 241 void returnToKernel() { 244 coroutine_desc * proc_cor = get_coroutine( kernelTLS.this_processor->runner);245 coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine;242 coroutine_desc * proc_cor = get_coroutine(TL_GET( this_processor )->runner); 243 coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine ); 246 244 ThreadCtxSwitch(thrd_cor, proc_cor); 247 245 } 248 246 249 // KERNEL_ONLY250 247 // Once a thread has finished running, some of 251 248 // its final actions must be executed from the kernel 252 249 void finishRunning(processor * this) with( this->finish ) { 253 250 if( action_code == Release ) { 254 verify( ! kernelTLS.preemption_state.enabled );251 verify( ! TL_GET( preemption_state ).enabled ); 255 252 unlock( *lock ); 256 253 } … … 259 256 } 260 257 else if( action_code == Release_Schedule ) { 261 verify( ! kernelTLS.preemption_state.enabled );258 verify( ! TL_GET( preemption_state ).enabled ); 262 259 unlock( *lock ); 263 260 ScheduleThread( thrd ); 264 261 } 265 262 else if( action_code == Release_Multi ) { 266 verify( ! kernelTLS.preemption_state.enabled );263 verify( ! TL_GET( preemption_state ).enabled ); 267 264 for(int i = 0; i < lock_count; i++) { 268 265 unlock( *locks[i] ); … … 288 285 } 289 286 290 // KERNEL_ONLY291 287 // Context invoker for processors 292 288 // This is the entry point for processors (kernel threads) … … 294 290 void * CtxInvokeProcessor(void * arg) { 295 291 processor * proc = (processor *) arg; 296 kernelTLS.this_processor = proc;297 kernelTLS.this_coroutine = NULL;298 kernelTLS.this_thread = NULL;299 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];292 TL_SET( this_processor, proc ); 293 TL_SET( this_coroutine, NULL ); 294 TL_SET( this_thread, NULL ); 295 TL_GET( preemption_state ).[enabled, disable_count] = [false, 1]; 300 296 // SKULLDUGGERY: We want to create a context for the processor coroutine 301 297 // which is needed for the 2-step context switch. However, there is no reason … … 309 305 310 306 //Set global state 311 kernelTLS.this_coroutine = get_coroutine(proc->runner);312 kernelTLS.this_thread = NULL;307 TL_SET( this_coroutine, get_coroutine(proc->runner) ); 308 TL_SET( this_thread, NULL ); 313 309 314 310 //We now have a proper context from which to schedule threads … … 337 333 } 338 334 339 // KERNEL_ONLY340 335 void kernel_first_resume(processor * this) { 341 coroutine_desc * src = kernelTLS.this_coroutine;336 coroutine_desc * src = TL_GET( this_coroutine ); 342 337 coroutine_desc * dst = get_coroutine(this->runner); 343 338 344 verify( ! kernelTLS.preemption_state.enabled );339 verify( ! TL_GET( preemption_state ).enabled ); 345 340 346 341 create_stack(&dst->stack, dst->stack.size); 347 342 CtxStart(&this->runner, CtxInvokeCoroutine); 348 343 349 verify( ! kernelTLS.preemption_state.enabled );344 verify( ! TL_GET( preemption_state ).enabled ); 350 345 351 346 dst->last = src; … … 356 351 357 352 // set new coroutine that task is executing 358 kernelTLS.this_coroutine = dst;353 TL_SET( this_coroutine, dst ); 359 354 360 355 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch. … … 373 368 src->state = Active; 374 369 375 verify( ! kernelTLS.preemption_state.enabled );370 verify( ! TL_GET( preemption_state ).enabled ); 376 371 } 377 372 378 373 //----------------------------------------------------------------------------- 379 374 // Scheduler routines 380 381 // KERNEL ONLY382 375 void ScheduleThread( thread_desc * thrd ) { 376 // if( ! thrd ) return; 383 377 verify( thrd ); 384 378 verify( thrd->self_cor.state != Halted ); 385 379 386 verify( ! kernelTLS.preemption_state.enabled );380 verify( ! TL_GET( preemption_state ).enabled ); 387 381 388 382 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); … … 394 388 } 395 389 396 verify( ! kernelTLS.preemption_state.enabled ); 397 } 398 399 // KERNEL ONLY 390 verify( ! TL_GET( preemption_state ).enabled ); 391 } 392 400 393 thread_desc * nextThread(cluster * this) with( *this ) { 401 verify( ! kernelTLS.preemption_state.enabled );394 verify( ! TL_GET( preemption_state ).enabled ); 402 395 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 403 396 thread_desc * head = pop_head( ready_queue ); 404 397 unlock( ready_queue_lock ); 405 verify( ! kernelTLS.preemption_state.enabled );398 verify( ! TL_GET( preemption_state ).enabled ); 406 399 return head; 407 400 } … … 409 402 void BlockInternal() { 410 403 disable_interrupts(); 411 verify( ! kernelTLS.preemption_state.enabled );404 verify( ! TL_GET( preemption_state ).enabled ); 412 405 returnToKernel(); 413 verify( ! kernelTLS.preemption_state.enabled );406 verify( ! TL_GET( preemption_state ).enabled ); 414 407 enable_interrupts( __cfaabi_dbg_ctx ); 415 408 } … … 417 410 void BlockInternal( __spinlock_t * lock ) { 418 411 disable_interrupts(); 419 with( * kernelTLS.this_processor) {412 with( *TL_GET( this_processor ) ) { 420 413 finish.action_code = Release; 421 414 finish.lock = lock; 422 415 } 423 416 424 verify( ! kernelTLS.preemption_state.enabled );417 verify( ! TL_GET( preemption_state ).enabled ); 425 418 returnToKernel(); 426 verify( ! kernelTLS.preemption_state.enabled );419 verify( ! TL_GET( preemption_state ).enabled ); 427 420 428 421 enable_interrupts( __cfaabi_dbg_ctx ); … … 431 424 void BlockInternal( thread_desc * thrd ) { 432 425 disable_interrupts(); 433 with( * kernelTLS.this_processor) {426 with( *TL_GET( this_processor ) ) { 434 427 finish.action_code = Schedule; 435 428 finish.thrd = thrd; 436 429 } 437 430 438 verify( ! kernelTLS.preemption_state.enabled );431 verify( ! TL_GET( preemption_state ).enabled ); 439 432 returnToKernel(); 440 verify( ! kernelTLS.preemption_state.enabled );433 verify( ! TL_GET( preemption_state ).enabled ); 441 434 442 435 enable_interrupts( __cfaabi_dbg_ctx ); … … 446 439 assert(thrd); 447 440 disable_interrupts(); 448 with( * kernelTLS.this_processor) {441 with( *TL_GET( this_processor ) ) { 449 442 finish.action_code = Release_Schedule; 450 443 finish.lock = lock; … … 452 445 } 453 446 454 verify( ! kernelTLS.preemption_state.enabled );447 verify( ! TL_GET( preemption_state ).enabled ); 455 448 returnToKernel(); 456 verify( ! kernelTLS.preemption_state.enabled );449 verify( ! TL_GET( preemption_state ).enabled ); 457 450 458 451 enable_interrupts( __cfaabi_dbg_ctx ); … … 461 454 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 462 455 disable_interrupts(); 463 with( * kernelTLS.this_processor) {456 with( *TL_GET( this_processor ) ) { 464 457 finish.action_code = Release_Multi; 465 458 finish.locks = locks; … … 467 460 } 468 461 469 verify( ! kernelTLS.preemption_state.enabled );462 verify( ! TL_GET( preemption_state ).enabled ); 470 463 returnToKernel(); 471 verify( ! kernelTLS.preemption_state.enabled );464 verify( ! TL_GET( preemption_state ).enabled ); 472 465 473 466 enable_interrupts( __cfaabi_dbg_ctx ); … … 476 469 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 477 470 disable_interrupts(); 478 with( * kernelTLS.this_processor) {471 with( *TL_GET( this_processor ) ) { 479 472 finish.action_code = Release_Multi_Schedule; 480 473 finish.locks = locks; … … 484 477 } 485 478 486 verify( ! kernelTLS.preemption_state.enabled );479 verify( ! TL_GET( preemption_state ).enabled ); 487 480 returnToKernel(); 488 verify( ! kernelTLS.preemption_state.enabled );481 verify( ! TL_GET( preemption_state ).enabled ); 489 482 490 483 enable_interrupts( __cfaabi_dbg_ctx ); 491 484 } 492 485 493 // KERNEL ONLY494 486 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 495 verify( ! kernelTLS.preemption_state.enabled );496 with( * kernelTLS.this_processor) {487 verify( ! TL_GET( preemption_state ).enabled ); 488 with( *TL_GET( this_processor ) ) { 497 489 finish.action_code = thrd ? Release_Schedule : Release; 498 490 finish.lock = lock; … … 509 501 // Kernel boot procedures 510 502 void kernel_startup(void) { 511 verify( ! kernelTLS.preemption_state.enabled );503 verify( ! TL_GET( preemption_state ).enabled ); 512 504 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 513 505 … … 555 547 556 548 //initialize the global state variables 557 kernelTLS.this_processor = mainProcessor;558 kernelTLS.this_thread = mainThread;559 kernelTLS.this_coroutine = &mainThread->self_cor;549 TL_SET( this_processor, mainProcessor ); 550 TL_SET( this_thread, mainThread ); 551 TL_SET( this_coroutine, &mainThread->self_cor ); 560 552 561 553 // Enable preemption … … 569 561 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 570 562 // mainThread is on the ready queue when this call is made. 571 kernel_first_resume( kernelTLS.this_processor);563 kernel_first_resume( TL_GET( this_processor ) ); 572 564 573 565 … … 576 568 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 577 569 578 verify( ! kernelTLS.preemption_state.enabled );570 verify( ! TL_GET( preemption_state ).enabled ); 579 571 enable_interrupts( __cfaabi_dbg_ctx ); 580 verify( TL_GET( preemption_state .enabled ));572 verify( TL_GET( preemption_state ).enabled ); 581 573 } 582 574 … … 584 576 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 585 577 586 verify( TL_GET( preemption_state .enabled ));578 verify( TL_GET( preemption_state ).enabled ); 587 579 disable_interrupts(); 588 verify( ! kernelTLS.preemption_state.enabled );580 verify( ! TL_GET( preemption_state ).enabled ); 589 581 590 582 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. … … 612 604 613 605 //============================================================================================= 614 // Kernel Quiescing615 //=============================================================================================616 617 // void halt(processor * this) with( this ) {618 // pthread_mutex_lock( &idle.lock );619 620 621 622 // // SKULLDUGGERY: Even if spurious wake-up is a thing623 // // spuriously waking up a kernel thread is not a big deal624 // // if it is very rare.625 // pthread_cond_wait( &idle.cond, &idle.lock);626 // pthread_mutex_unlock( &idle.lock );627 // }628 629 // void wake(processor * this) with( this ) {630 // pthread_mutex_lock (&idle.lock);631 // pthread_cond_signal (&idle.cond);632 // pthread_mutex_unlock(&idle.lock);633 // }634 635 //=============================================================================================636 606 // Unexpected Terminating logic 637 607 //============================================================================================= … … 642 612 static bool kernel_abort_called = false; 643 613 644 void * kernel_abort (void) __attribute__ ((__nothrow__)) {614 void * kernel_abort (void) __attribute__ ((__nothrow__)) { 645 615 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 646 616 // the globalAbort flag is true. … … 663 633 } 664 634 665 return kernelTLS.this_thread;635 return TL_GET( this_thread ); 666 636 } 667 637 … … 672 642 __cfaabi_dbg_bits_write( abort_text, len ); 673 643 674 if ( get_coroutine(thrd) != kernelTLS.this_coroutine) {675 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine);644 if ( get_coroutine(thrd) != TL_GET( this_coroutine ) ) { 645 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ) ); 676 646 __cfaabi_dbg_bits_write( abort_text, len ); 677 647 } … … 682 652 683 653 int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { 684 return get_coroutine( kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;654 return get_coroutine(TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2; 685 655 } 686 656 … … 712 682 if ( count < 0 ) { 713 683 // queue current task 714 append( waiting, kernelTLS.this_thread);684 append( waiting, (thread_desc *)TL_GET( this_thread ) ); 715 685 716 686 // atomically release spin lock and block … … 772 742 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) { 773 743 this.prev_name = prev_name; 774 this.prev_thrd = kernelTLS.this_thread;744 this.prev_thrd = TL_GET( this_thread ); 775 745 } 776 746 )
Note:
See TracChangeset
for help on using the changeset viewer.