Changeset 32cab5b for src/libcfa/concurrency/kernel.c
- Timestamp:
- Apr 17, 2018, 12:01:09 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 3265399
- Parents:
- b2fe1c9 (diff), 81bb114 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/kernel.c
rb2fe1c9 r32cab5b 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Feb 8 23:52:19201813 // Update Count : 512 // Last Modified On : Mon Apr 9 16:11:46 2018 13 // Update Count : 24 14 14 // 15 15 … … 25 25 26 26 //CFA Includes 27 #include "time" 27 28 #include "kernel_private.h" 28 29 #include "preemption.h" … … 52 53 // Global state 53 54 54 thread_local coroutine_desc * volatile this_coroutine;55 thread_local thread_desc * volatile this_thread;56 thread_local processor * volatile this_processor;57 58 55 // volatile thread_local bool preemption_in_progress = 0; 59 56 // volatile thread_local bool preemption_enabled = false; 60 57 // volatile thread_local unsigned short disable_preempt_count = 1; 61 58 62 volatile thread_local __cfa_kernel_preemption_state_t preemption_state = { false, false, 1 }; 59 thread_local struct KernelThreadData kernelThreadData = { 60 NULL, 61 NULL, 62 NULL, 63 { 1, false, false } 64 }; 63 65 64 66 //----------------------------------------------------------------------------- … … 172 174 terminate(&this); 173 175 verify(this.do_terminate); 174 verify( this_processor!= &this);176 verify(TL_GET( this_processor ) != &this); 175 177 P( terminated ); 176 verify( this_processor!= &this);178 verify(TL_GET( this_processor ) != &this); 177 179 pthread_join( kernel_thread, NULL ); 178 180 } … … 213 215 if(readyThread) 214 216 { 215 verify( ! preemption_state.enabled );217 verify( ! TL_GET( preemption_state ).enabled ); 216 218 217 219 runThread(this, readyThread); 218 220 219 verify( ! preemption_state.enabled );221 verify( ! TL_GET( preemption_state ).enabled ); 220 222 221 223 //Some actions need to be taken from the kernel … … 249 251 250 252 //Update global state 251 this_thread = dst;253 TL_SET( this_thread, dst ); 252 254 253 255 // Context Switch to the thread … … 257 259 258 260 void returnToKernel() { 259 coroutine_desc * proc_cor = get_coroutine( this_processor->runner);260 coroutine_desc * thrd_cor = this_thread->curr_cor = this_coroutine;261 coroutine_desc * proc_cor = get_coroutine(TL_GET( this_processor )->runner); 262 coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine ); 261 263 ThreadCtxSwitch(thrd_cor, proc_cor); 262 264 } … … 266 268 void finishRunning(processor * this) with( this->finish ) { 267 269 if( action_code == Release ) { 268 verify( ! preemption_state.enabled );270 verify( ! TL_GET( preemption_state ).enabled ); 269 271 unlock( *lock ); 270 272 } … … 273 275 } 274 276 else if( action_code == Release_Schedule ) { 275 verify( ! preemption_state.enabled );277 verify( ! TL_GET( preemption_state ).enabled ); 276 278 unlock( *lock ); 277 279 ScheduleThread( thrd ); 278 280 } 279 281 else if( action_code == Release_Multi ) { 280 verify( ! preemption_state.enabled );282 verify( ! TL_GET( preemption_state ).enabled ); 281 283 for(int i = 0; i < lock_count; i++) { 282 284 unlock( *locks[i] ); … … 307 309 void * CtxInvokeProcessor(void * arg) { 308 310 processor * proc = (processor *) arg; 309 this_processor = proc;310 this_coroutine = NULL;311 this_thread = NULL;312 preemption_state.enabled = false;313 preemption_state.disable_count = 1;311 TL_SET( this_processor, proc ); 312 TL_SET( this_coroutine, NULL ); 313 TL_SET( this_thread, NULL ); 314 TL_GET( preemption_state ).enabled = false; 315 TL_GET( preemption_state ).disable_count = 1; 314 316 // SKULLDUGGERY: We want to create a context for the processor coroutine 315 317 // which is needed for the 2-step context switch. However, there is no reason … … 323 325 324 326 //Set global state 325 this_coroutine = get_coroutine(proc->runner);326 this_thread = NULL;327 TL_SET( this_coroutine, get_coroutine(proc->runner) ); 328 TL_SET( this_thread, NULL ); 327 329 328 330 //We now have a proper context from which to schedule threads … … 352 354 353 355 void kernel_first_resume(processor * this) { 354 coroutine_desc * src = this_coroutine;356 coroutine_desc * src = TL_GET( this_coroutine ); 355 357 coroutine_desc * dst = get_coroutine(this->runner); 356 358 357 verify( ! preemption_state.enabled );359 verify( ! TL_GET( preemption_state ).enabled ); 358 360 359 361 create_stack(&dst->stack, dst->stack.size); 360 362 CtxStart(&this->runner, CtxInvokeCoroutine); 361 363 362 verify( ! preemption_state.enabled );364 verify( ! TL_GET( preemption_state ).enabled ); 363 365 364 366 dst->last = src; … … 369 371 370 372 // set new coroutine that task is executing 371 this_coroutine = dst;373 TL_SET( this_coroutine, dst ); 372 374 373 375 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch. … … 386 388 src->state = Active; 387 389 388 verify( ! preemption_state.enabled );390 verify( ! TL_GET( preemption_state ).enabled ); 389 391 } 390 392 … … 392 394 // Scheduler routines 393 395 void ScheduleThread( thread_desc * thrd ) { 394 // if( ! thrd ) return;396 // if( ! thrd ) return; 395 397 verify( thrd ); 396 398 verify( thrd->self_cor.state != Halted ); 397 399 398 verify( ! preemption_state.enabled );400 verify( ! TL_GET( preemption_state ).enabled ); 399 401 400 402 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 401 403 402 with( * this_processor->cltr ) {404 with( *TL_GET( this_processor )->cltr ) { 403 405 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 404 406 append( ready_queue, thrd ); … … 406 408 } 407 409 408 verify( ! preemption_state.enabled );410 verify( ! TL_GET( preemption_state ).enabled ); 409 411 } 410 412 411 413 thread_desc * nextThread(cluster * this) with( *this ) { 412 verify( ! preemption_state.enabled );414 verify( ! TL_GET( preemption_state ).enabled ); 413 415 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 414 416 thread_desc * head = pop_head( ready_queue ); 415 417 unlock( ready_queue_lock ); 416 verify( ! preemption_state.enabled );418 verify( ! TL_GET( preemption_state ).enabled ); 417 419 return head; 418 420 } … … 420 422 void BlockInternal() { 421 423 disable_interrupts(); 422 verify( ! preemption_state.enabled );424 verify( ! TL_GET( preemption_state ).enabled ); 423 425 returnToKernel(); 424 verify( ! preemption_state.enabled );426 verify( ! TL_GET( preemption_state ).enabled ); 425 427 enable_interrupts( __cfaabi_dbg_ctx ); 426 428 } … … 428 430 void BlockInternal( __spinlock_t * lock ) { 429 431 disable_interrupts(); 430 this_processor->finish.action_code = Release;431 this_processor->finish.lock = lock;432 433 verify( ! preemption_state.enabled );432 TL_GET( this_processor )->finish.action_code = Release; 433 TL_GET( this_processor )->finish.lock = lock; 434 435 verify( ! TL_GET( preemption_state ).enabled ); 434 436 returnToKernel(); 435 verify( ! preemption_state.enabled );437 verify( ! TL_GET( preemption_state ).enabled ); 436 438 437 439 enable_interrupts( __cfaabi_dbg_ctx ); … … 440 442 void BlockInternal( thread_desc * thrd ) { 441 443 disable_interrupts(); 442 this_processor->finish.action_code = Schedule;443 this_processor->finish.thrd = thrd;444 445 verify( ! preemption_state.enabled );444 TL_GET( this_processor )->finish.action_code = Schedule; 445 TL_GET( this_processor )->finish.thrd = thrd; 446 447 verify( ! TL_GET( preemption_state ).enabled ); 446 448 returnToKernel(); 447 verify( ! preemption_state.enabled );449 verify( ! TL_GET( preemption_state ).enabled ); 448 450 449 451 enable_interrupts( __cfaabi_dbg_ctx ); … … 453 455 assert(thrd); 454 456 disable_interrupts(); 455 this_processor->finish.action_code = Release_Schedule;456 this_processor->finish.lock = lock;457 this_processor->finish.thrd = thrd;458 459 verify( ! preemption_state.enabled );457 TL_GET( this_processor )->finish.action_code = Release_Schedule; 458 TL_GET( this_processor )->finish.lock = lock; 459 TL_GET( this_processor )->finish.thrd = thrd; 460 461 verify( ! TL_GET( preemption_state ).enabled ); 460 462 returnToKernel(); 461 verify( ! preemption_state.enabled );463 verify( ! TL_GET( preemption_state ).enabled ); 462 464 463 465 enable_interrupts( __cfaabi_dbg_ctx ); … … 466 468 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 467 469 disable_interrupts(); 468 this_processor->finish.action_code = Release_Multi;469 this_processor->finish.locks = locks;470 this_processor->finish.lock_count = count;471 472 verify( ! preemption_state.enabled );470 TL_GET( this_processor )->finish.action_code = Release_Multi; 471 TL_GET( this_processor )->finish.locks = locks; 472 TL_GET( this_processor )->finish.lock_count = count; 473 474 verify( ! TL_GET( preemption_state ).enabled ); 473 475 returnToKernel(); 474 verify( ! preemption_state.enabled );476 verify( ! TL_GET( preemption_state ).enabled ); 475 477 476 478 enable_interrupts( __cfaabi_dbg_ctx ); … … 479 481 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 480 482 disable_interrupts(); 481 this_processor->finish.action_code = Release_Multi_Schedule;482 this_processor->finish.locks = locks;483 this_processor->finish.lock_count = lock_count;484 this_processor->finish.thrds = thrds;485 this_processor->finish.thrd_count = thrd_count;486 487 verify( ! preemption_state.enabled );483 TL_GET( this_processor )->finish.action_code = Release_Multi_Schedule; 484 TL_GET( this_processor )->finish.locks = locks; 485 TL_GET( this_processor )->finish.lock_count = lock_count; 486 TL_GET( this_processor )->finish.thrds = thrds; 487 TL_GET( this_processor )->finish.thrd_count = thrd_count; 488 489 verify( ! TL_GET( preemption_state ).enabled ); 488 490 returnToKernel(); 489 verify( ! preemption_state.enabled );491 verify( ! TL_GET( preemption_state ).enabled ); 490 492 491 493 enable_interrupts( __cfaabi_dbg_ctx ); … … 493 495 494 496 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 495 verify( ! preemption_state.enabled );496 this_processor->finish.action_code = thrd ? Release_Schedule : Release;497 this_processor->finish.lock = lock;498 this_processor->finish.thrd = thrd;497 verify( ! TL_GET( preemption_state ).enabled ); 498 TL_GET( this_processor )->finish.action_code = thrd ? Release_Schedule : Release; 499 TL_GET( this_processor )->finish.lock = lock; 500 TL_GET( this_processor )->finish.thrd = thrd; 499 501 500 502 returnToKernel(); … … 507 509 // Kernel boot procedures 508 510 void kernel_startup(void) { 509 verify( ! preemption_state.enabled );511 verify( ! TL_GET( preemption_state ).enabled ); 510 512 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 511 513 … … 531 533 532 534 //initialize the global state variables 533 this_processor = mainProcessor;534 this_thread = mainThread;535 this_coroutine = &mainThread->self_cor;535 TL_SET( this_processor, mainProcessor ); 536 TL_SET( this_thread, mainThread ); 537 TL_SET( this_coroutine, &mainThread->self_cor ); 536 538 537 539 // Enable preemption … … 545 547 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 546 548 // mainThread is on the ready queue when this call is made. 547 kernel_first_resume( this_processor);549 kernel_first_resume( TL_GET( this_processor ) ); 548 550 549 551 … … 552 554 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 553 555 554 verify( ! preemption_state.enabled );556 verify( ! TL_GET( preemption_state ).enabled ); 555 557 enable_interrupts( __cfaabi_dbg_ctx ); 556 verify( preemption_state.enabled );558 verify( TL_GET( preemption_state ).enabled ); 557 559 } 558 560 … … 560 562 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 561 563 562 verify( preemption_state.enabled );564 verify( TL_GET( preemption_state ).enabled ); 563 565 disable_interrupts(); 564 verify( ! preemption_state.enabled );566 verify( ! TL_GET( preemption_state ).enabled ); 565 567 566 568 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. … … 602 604 603 605 // first task to abort ? 604 if ( ! kernel_abort_called ) { // not first task to abort ?606 if ( ! kernel_abort_called ) { // not first task to abort ? 605 607 kernel_abort_called = true; 606 608 unlock( kernel_abort_lock ); … … 617 619 } 618 620 619 return this_thread;621 return TL_GET( this_thread ); 620 622 } 621 623 … … 626 628 __cfaabi_dbg_bits_write( abort_text, len ); 627 629 628 if ( thrd != this_coroutine) {629 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine);630 if ( get_coroutine(thrd) != TL_GET( this_coroutine ) ) { 631 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ) ); 630 632 __cfaabi_dbg_bits_write( abort_text, len ); 631 633 } … … 636 638 637 639 int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { 638 return get_coroutine( this_thread) == get_coroutine(mainThread) ? 4 : 2;640 return get_coroutine(TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2; 639 641 } 640 642 … … 666 668 if ( count < 0 ) { 667 669 // queue current task 668 append( waiting, (thread_desc *) this_thread);670 append( waiting, (thread_desc *)TL_GET( this_thread ) ); 669 671 670 672 // atomically release spin lock and block
Note:
See TracChangeset
for help on using the changeset viewer.