Changeset 28f3a19 for src/libcfa/concurrency/kernel.c
- Timestamp:
- Jun 27, 2018, 3:28:41 PM (7 years ago)
- Branches:
- new-env, with_gc
- Children:
- b21c77a
- Parents:
- 0182bfa (diff), 63238a4 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/kernel.c
r0182bfa r28f3a19 16 16 //C Includes 17 17 #include <stddef.h> 18 #include <errno.h> 19 #include <string.h> 18 20 extern "C" { 19 21 #include <stdio.h> … … 49 51 thread_desc * mainThread; 50 52 51 struct { __dllist_t(cluster ) list; __spinlock_t lock; } global_clusters; 53 extern "C" { 54 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 55 } 52 56 53 57 //----------------------------------------------------------------------------- … … 143 147 runner.proc = &this; 144 148 149 idleLock{}; 150 145 151 start( &this ); 146 152 } 147 153 148 154 void ^?{}(processor & this) with( this ){ 149 if( ! do_terminate) {155 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) { 150 156 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this); 151 terminate(&this); 152 verify(this.do_terminate); 153 verify( kernelTLS.this_processor != &this); 157 158 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED); 159 wake( &this ); 160 154 161 P( terminated ); 155 162 verify( kernelTLS.this_processor != &this); 156 pthread_join( kernel_thread, NULL ); 157 } 163 } 164 165 pthread_join( kernel_thread, NULL ); 158 166 } 159 167 … … 194 202 195 203 thread_desc * readyThread = NULL; 196 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ )204 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) 197 205 { 198 206 readyThread = nextThread( this->cltr ); … … 213 221 else 214 222 { 215 spin(this, &spin_count); 223 // spin(this, &spin_count); 224 halt(this); 216 225 } 217 226 } … … 257 266 // its final actions must be executed from the kernel 258 267 void finishRunning(processor * this) with( this->finish ) { 259 if( action_code == Release ) { 260 verify( ! kernelTLS.preemption_state.enabled ); 268 verify( ! kernelTLS.preemption_state.enabled ); 269 choose( action_code ) { 270 case No_Action: 271 break; 272 case Release: 261 273 unlock( *lock ); 262 } 263 else if( action_code == Schedule ) { 274 case Schedule: 264 275 ScheduleThread( thrd ); 265 } 266 else if( action_code == Release_Schedule ) { 267 verify( ! kernelTLS.preemption_state.enabled ); 276 case Release_Schedule: 268 277 unlock( *lock ); 269 278 ScheduleThread( thrd ); 270 } 271 else if( action_code == Release_Multi ) { 272 verify( ! kernelTLS.preemption_state.enabled ); 279 case Release_Multi: 273 280 for(int i = 0; i < lock_count; i++) { 274 281 unlock( *locks[i] ); 275 282 } 276 } 277 else if( action_code == Release_Multi_Schedule ) { 283 case Release_Multi_Schedule: 278 284 for(int i = 0; i < lock_count; i++) { 279 285 unlock( *locks[i] ); … … 282 288 ScheduleThread( thrds[i] ); 283 289 } 284 } 285 else { 286 assert(action_code == No_Action); 287 } 288 } 289 290 // Handles spinning logic 291 // TODO : find some strategy to put cores to sleep after some time 292 void spin(processor * this, unsigned int * spin_count) { 293 (*spin_count)++; 290 case Callback: 291 callback(); 292 default: 293 abort("KERNEL ERROR: Unexpected action to run after thread"); 294 } 294 295 } 295 296 … … 396 397 with( *thrd->curr_cluster ) { 397 398 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 399 bool was_empty = !(ready_queue != 0); 398 400 append( ready_queue, thrd ); 399 401 unlock( ready_queue_lock ); 402 403 if(was_empty) { 404 lock (proc_list_lock __cfaabi_dbg_ctx2); 405 if(idles) { 406 wake_fast(idles.head); 407 } 408 unlock (proc_list_lock); 409 } 410 else if( struct processor * idle = idles.head ) { 411 wake_fast(idle); 412 } 413 400 414 } 401 415 … … 497 511 } 498 512 513 void BlockInternal(__finish_callback_fptr_t callback) { 514 disable_interrupts(); 515 with( *kernelTLS.this_processor ) { 516 finish.action_code = Callback; 517 finish.callback = callback; 518 } 519 520 verify( ! kernelTLS.preemption_state.enabled ); 521 returnToKernel(); 522 verify( ! kernelTLS.preemption_state.enabled ); 523 524 enable_interrupts( __cfaabi_dbg_ctx ); 525 } 526 499 527 // KERNEL ONLY 500 528 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { … … 518 546 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 519 547 520 global_clusters.list{ __get };521 global_clusters.lock{};548 __cfa_dbg_global_clusters.list{ __get }; 549 __cfa_dbg_global_clusters.lock{}; 522 550 523 551 // Initialize the main cluster … … 600 628 // When its coroutine terminates, it return control to the mainThread 601 629 // which is currently here 602 mainProcessor->do_terminate = true;630 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 603 631 returnToKernel(); 632 mainThread->self_cor.state = Halted; 604 633 605 634 // THE SYSTEM IS NOW COMPLETELY STOPPED … … 617 646 ^(mainThread){}; 618 647 619 ^( global_clusters.list){};620 ^( global_clusters.lock){};648 ^(__cfa_dbg_global_clusters.list){}; 649 ^(__cfa_dbg_global_clusters.lock){}; 621 650 622 651 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n"); … … 627 656 //============================================================================================= 628 657 629 // void halt(processor * this) with( this ) { 630 // pthread_mutex_lock( &idle.lock ); 631 632 633 634 // // SKULLDUGGERY: Even if spurious wake-up is a thing 635 // // spuriously waking up a kernel thread is not a big deal 636 // // if it is very rare. 637 // pthread_cond_wait( &idle.cond, &idle.lock); 638 // pthread_mutex_unlock( &idle.lock ); 639 // } 640 641 // void wake(processor * this) with( this ) { 642 // pthread_mutex_lock (&idle.lock); 643 // pthread_cond_signal (&idle.cond); 644 // pthread_mutex_unlock(&idle.lock); 645 // } 658 void halt(processor * this) with( *this ) { 659 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 660 661 with( *cltr ) { 662 lock (proc_list_lock __cfaabi_dbg_ctx2); 663 remove (procs, *this); 664 push_front(idles, *this); 665 unlock (proc_list_lock); 666 } 667 668 __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this); 669 670 wait( idleLock ); 671 672 __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this); 673 674 with( *cltr ) { 675 lock (proc_list_lock __cfaabi_dbg_ctx2); 676 remove (idles, *this); 677 push_front(procs, *this); 678 unlock (proc_list_lock); 679 } 680 } 646 681 647 682 //============================================================================================= … … 758 793 // Global Queues 759 794 void doregister( cluster & cltr ) { 760 lock ( global_clusters.lock __cfaabi_dbg_ctx2);761 push_front( global_clusters.list, cltr );762 unlock ( global_clusters.lock );795 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 796 push_front( __cfa_dbg_global_clusters.list, cltr ); 797 unlock ( __cfa_dbg_global_clusters.lock ); 763 798 } 764 799 765 800 void unregister( cluster & cltr ) { 766 lock ( global_clusters.lock __cfaabi_dbg_ctx2);767 remove( global_clusters.list, cltr );768 unlock( global_clusters.lock );801 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 802 remove( __cfa_dbg_global_clusters.list, cltr ); 803 unlock( __cfa_dbg_global_clusters.lock ); 769 804 } 770 805
Note:
See TracChangeset
for help on using the changeset viewer.