- File:
-
- 1 edited
-
src/libcfa/concurrency/kernel.c (modified) (14 diffs)
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/kernel.c
r85b1deb ra1a17a74 16 16 //C Includes 17 17 #include <stddef.h> 18 #include <errno.h>19 #include <string.h>20 18 extern "C" { 21 19 #include <stdio.h> … … 51 49 thread_desc * mainThread; 52 50 53 extern "C" { 54 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 55 } 51 struct { __dllist_t(cluster ) list; __spinlock_t lock; } global_clusters; 56 52 57 53 //----------------------------------------------------------------------------- … … 147 143 runner.proc = &this; 148 144 149 idleLock{};150 151 145 start( &this ); 152 146 } 153 147 154 148 void ^?{}(processor & this) with( this ){ 155 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE)) {149 if( ! do_terminate ) { 156 150 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this); 157 158 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED); 159 wake( &this ); 160 151 terminate(&this); 152 verify(this.do_terminate); 153 verify( kernelTLS.this_processor != &this); 161 154 P( terminated ); 162 155 verify( kernelTLS.this_processor != &this); 163 } 164 165 pthread_join( kernel_thread, NULL ); 156 pthread_join( kernel_thread, NULL ); 157 } 166 158 } 167 159 … … 202 194 203 195 thread_desc * readyThread = NULL; 204 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ )196 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ ) 205 197 { 206 198 readyThread = nextThread( this->cltr ); … … 221 213 else 222 214 { 223 // spin(this, &spin_count); 224 halt(this); 215 spin(this, &spin_count); 225 216 } 226 217 } … … 266 257 // its final actions must be executed from the kernel 267 258 void finishRunning(processor * this) with( this->finish ) { 268 verify( ! kernelTLS.preemption_state.enabled ); 269 choose( action_code ) { 270 case No_Action: 271 break; 272 case Release: 259 if( action_code == Release ) { 260 verify( ! kernelTLS.preemption_state.enabled ); 273 261 unlock( *lock ); 274 case Schedule: 262 } 263 else if( action_code == Schedule ) { 275 264 ScheduleThread( thrd ); 276 case Release_Schedule: 265 } 266 else if( action_code == Release_Schedule ) { 267 verify( ! kernelTLS.preemption_state.enabled ); 277 268 unlock( *lock ); 278 269 ScheduleThread( thrd ); 279 case Release_Multi: 270 } 271 else if( action_code == Release_Multi ) { 272 verify( ! kernelTLS.preemption_state.enabled ); 280 273 for(int i = 0; i < lock_count; i++) { 281 274 unlock( *locks[i] ); 282 275 } 283 case Release_Multi_Schedule: 276 } 277 else if( action_code == Release_Multi_Schedule ) { 284 278 for(int i = 0; i < lock_count; i++) { 285 279 unlock( *locks[i] ); … … 288 282 ScheduleThread( thrds[i] ); 289 283 } 290 case Callback: 291 callback(); 292 default: 293 abort("KERNEL ERROR: Unexpected action to run after thread"); 294 } 284 } 285 else { 286 assert(action_code == No_Action); 287 } 288 } 289 290 // Handles spinning logic 291 // TODO : find some strategy to put cores to sleep after some time 292 void spin(processor * this, unsigned int * spin_count) { 293 (*spin_count)++; 295 294 } 296 295 … … 397 396 with( *thrd->curr_cluster ) { 398 397 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 399 bool was_empty = !(ready_queue != 0);400 398 append( ready_queue, thrd ); 401 399 unlock( ready_queue_lock ); 402 403 if(was_empty) {404 lock (proc_list_lock __cfaabi_dbg_ctx2);405 if(idles) {406 wake_fast(idles.head);407 }408 unlock (proc_list_lock);409 }410 else if( struct processor * idle = idles.head ) {411 wake_fast(idle);412 }413 414 400 } 415 401 … … 511 497 } 512 498 513 void BlockInternal(__finish_callback_fptr_t callback) {514 disable_interrupts();515 with( *kernelTLS.this_processor ) {516 finish.action_code = Callback;517 finish.callback = callback;518 }519 520 verify( ! kernelTLS.preemption_state.enabled );521 returnToKernel();522 verify( ! kernelTLS.preemption_state.enabled );523 524 enable_interrupts( __cfaabi_dbg_ctx );525 }526 527 499 // KERNEL ONLY 528 500 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { … … 546 518 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 547 519 548 __cfa_dbg_global_clusters.list{ __get };549 __cfa_dbg_global_clusters.lock{};520 global_clusters.list{ __get }; 521 global_clusters.lock{}; 550 522 551 523 // Initialize the main cluster … … 628 600 // When its coroutine terminates, it return control to the mainThread 629 601 // which is currently here 630 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);602 mainProcessor->do_terminate = true; 631 603 returnToKernel(); 632 mainThread->self_cor.state = Halted;633 604 634 605 // THE SYSTEM IS NOW COMPLETELY STOPPED … … 646 617 ^(mainThread){}; 647 618 648 ^( __cfa_dbg_global_clusters.list){};649 ^( __cfa_dbg_global_clusters.lock){};619 ^(global_clusters.list){}; 620 ^(global_clusters.lock){}; 650 621 651 622 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n"); … … 656 627 //============================================================================================= 657 628 658 void halt(processor * this) with( *this ) { 659 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 660 661 with( *cltr ) { 662 lock (proc_list_lock __cfaabi_dbg_ctx2); 663 remove (procs, *this); 664 push_front(idles, *this); 665 unlock (proc_list_lock); 666 } 667 668 __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this); 669 670 wait( idleLock ); 671 672 __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this); 673 674 with( *cltr ) { 675 lock (proc_list_lock __cfaabi_dbg_ctx2); 676 remove (idles, *this); 677 push_front(procs, *this); 678 unlock (proc_list_lock); 679 } 680 } 629 // void halt(processor * this) with( this ) { 630 // pthread_mutex_lock( &idle.lock ); 631 632 633 634 // // SKULLDUGGERY: Even if spurious wake-up is a thing 635 // // spuriously waking up a kernel thread is not a big deal 636 // // if it is very rare. 637 // pthread_cond_wait( &idle.cond, &idle.lock); 638 // pthread_mutex_unlock( &idle.lock ); 639 // } 640 641 // void wake(processor * this) with( this ) { 642 // pthread_mutex_lock (&idle.lock); 643 // pthread_cond_signal (&idle.cond); 644 // pthread_mutex_unlock(&idle.lock); 645 // } 681 646 682 647 //============================================================================================= … … 793 758 // Global Queues 794 759 void doregister( cluster & cltr ) { 795 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);796 push_front( __cfa_dbg_global_clusters.list, cltr );797 unlock ( __cfa_dbg_global_clusters.lock );760 lock ( global_clusters.lock __cfaabi_dbg_ctx2); 761 push_front( global_clusters.list, cltr ); 762 unlock ( global_clusters.lock ); 798 763 } 799 764 800 765 void unregister( cluster & cltr ) { 801 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);802 remove( __cfa_dbg_global_clusters.list, cltr );803 unlock( __cfa_dbg_global_clusters.lock );766 lock ( global_clusters.lock __cfaabi_dbg_ctx2); 767 remove( global_clusters.list, cltr ); 768 unlock( global_clusters.lock ); 804 769 } 805 770
Note:
See TracChangeset
for help on using the changeset viewer.