Changeset beefc34c for src/libcfa/concurrency
- Timestamp:
- Jun 7, 2018, 6:12:11 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 6eb131c, 7b28e4a
- Parents:
- 174845e (diff), 85b1deb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/kernel
r174845e rbeefc34c 113 113 pthread_t kernel_thread; 114 114 115 // RunThread data 116 // Action to do after a thread is ran 117 struct FinishAction finish; 118 119 // Preemption data 120 // Node which is added in the discrete event simulaiton 121 struct alarm_node_t * preemption_alarm; 122 123 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible 124 bool pending_preemption; 125 126 // Idle lock 127 __bin_sem_t idleLock; 128 115 129 // Termination 116 130 // Set to true to notify the processor should terminate … … 120 134 semaphore terminated; 121 135 122 // RunThread data123 // Action to do after a thread is ran124 struct FinishAction finish;125 126 // Preemption data127 // Node which is added in the discrete event simulaiton128 struct alarm_node_t * preemption_alarm;129 130 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible131 bool pending_preemption;132 133 // Idle lock134 sem_t idleLock;135 136 136 // Link lists fields 137 struct {137 struct __dbg_node_proc { 138 138 struct processor * next; 139 139 struct processor * prev; … … 182 182 183 183 // Link lists fields 184 struct {184 struct __dbg_node_cltr { 185 185 cluster * next; 186 186 cluster * prev; -
src/libcfa/concurrency/kernel.c
r174845e rbeefc34c 16 16 //C Includes 17 17 #include <stddef.h> 18 #include <errno.h> 19 #include <string.h> 18 20 extern "C" { 19 21 #include <stdio.h> … … 49 51 thread_desc * mainThread; 50 52 51 struct { __dllist_t(cluster) list; __spinlock_t lock; } global_clusters; 53 extern "C" { 54 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 55 } 52 56 53 57 //----------------------------------------------------------------------------- … … 143 147 runner.proc = &this; 144 148 145 sem_init(&idleLock, 0, 0);149 idleLock{}; 146 150 147 151 start( &this ); … … 149 153 150 154 void ^?{}(processor & this) with( this ){ 151 if( ! do_terminate) {155 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) { 152 156 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this); 153 terminate(&this); 154 verify(this.do_terminate); 155 verify( kernelTLS.this_processor != &this); 157 158 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED); 159 wake( &this ); 160 156 161 P( terminated ); 157 162 verify( kernelTLS.this_processor != &this); 158 pthread_join( kernel_thread, NULL ); 159 } 160 161 sem_destroy(&idleLock); 163 } 164 165 pthread_join( kernel_thread, NULL ); 162 166 } 163 167 … … 198 202 199 203 thread_desc * readyThread = NULL; 200 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ )204 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) 201 205 { 202 206 readyThread = nextThread( this->cltr ); … … 217 221 else 218 222 { 219 spin(this, &spin_count); 223 // spin(this, &spin_count); 224 halt(this); 220 225 } 221 226 } … … 290 295 } 291 296 292 // Handles spinning logic293 // TODO : find some strategy to put cores to sleep after some time294 void spin(processor * this, unsigned int * spin_count) {295 // (*spin_count)++;296 halt(this);297 }298 299 297 // KERNEL_ONLY 300 298 // Context invoker for processors … … 403 401 unlock( ready_queue_lock ); 404 402 405 if( was_empty) {403 if(was_empty) { 406 404 lock (proc_list_lock __cfaabi_dbg_ctx2); 407 405 if(idles) { 408 wake (idles.head);406 wake_fast(idles.head); 409 407 } 410 408 unlock (proc_list_lock); 411 409 } 410 else if( struct processor * idle = idles.head ) { 411 wake_fast(idle); 412 } 413 412 414 } 413 415 … … 544 546 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 545 547 546 global_clusters.list{ __get };547 global_clusters.lock{};548 __cfa_dbg_global_clusters.list{ __get }; 549 __cfa_dbg_global_clusters.lock{}; 548 550 549 551 // Initialize the main cluster … … 626 628 // When its coroutine terminates, it return control to the mainThread 627 629 // which is currently here 628 mainProcessor->do_terminate = true;630 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 629 631 returnToKernel(); 632 mainThread->self_cor.state = Halted; 630 633 631 634 // THE SYSTEM IS NOW COMPLETELY STOPPED … … 643 646 ^(mainThread){}; 644 647 645 ^( global_clusters.list){};646 ^( global_clusters.lock){};648 ^(__cfa_dbg_global_clusters.list){}; 649 ^(__cfa_dbg_global_clusters.lock){}; 647 650 648 651 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n"); … … 654 657 655 658 void halt(processor * this) with( *this ) { 659 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 660 656 661 with( *cltr ) { 657 662 lock (proc_list_lock __cfaabi_dbg_ctx2); … … 663 668 __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this); 664 669 665 verify( ({int sval = 0; sem_getvalue(&this->idleLock, &sval); sval; }) < 200); 666 int __attribute__((unused)) ret = sem_wait(&idleLock); 667 verify(ret > 0 || errno == EINTR); 670 wait( idleLock ); 668 671 669 672 __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this); … … 675 678 unlock (proc_list_lock); 676 679 } 677 }678 679 void wake(processor * this) {680 __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this);681 int __attribute__((unused)) ret = sem_post(&this->idleLock);682 verify(ret > 0 || errno == EINTR);683 verify( ({int sval = 0; sem_getvalue(&this->idleLock, &sval); sval; }) < 200);684 680 } 685 681 … … 797 793 // Global Queues 798 794 void doregister( cluster & cltr ) { 799 lock ( global_clusters.lock __cfaabi_dbg_ctx2);800 push_front( global_clusters.list, cltr );801 unlock ( global_clusters.lock );795 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 796 push_front( __cfa_dbg_global_clusters.list, cltr ); 797 unlock ( __cfa_dbg_global_clusters.lock ); 802 798 } 803 799 804 800 void unregister( cluster & cltr ) { 805 lock ( global_clusters.lock __cfaabi_dbg_ctx2);806 remove( global_clusters.list, cltr );807 unlock( global_clusters.lock );801 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 802 remove( __cfa_dbg_global_clusters.list, cltr ); 803 unlock( __cfa_dbg_global_clusters.lock ); 808 804 } 809 805 -
src/libcfa/concurrency/kernel_private.h
r174845e rbeefc34c 58 58 void finishRunning(processor * this); 59 59 void halt(processor * this); 60 void wake(processor * this); 61 void terminate(processor * this); 62 void spin(processor * this, unsigned int * spin_count); 60 61 static inline void wake_fast(processor * this) { 62 __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this); 63 post( this->idleLock ); 64 } 65 66 static inline void wake(processor * this) { 67 disable_interrupts(); 68 wake_fast(this); 69 enable_interrupts( __cfaabi_dbg_ctx ); 70 } 63 71 64 72 struct event_kernel_t { … … 68 76 69 77 extern event_kernel_t * event_kernel; 70 71 //extern thread_local coroutine_desc * volatile this_coroutine;72 //extern thread_local thread_desc * volatile this_thread;73 //extern thread_local processor * volatile this_processor;74 75 // extern volatile thread_local bool preemption_in_progress;76 // extern volatile thread_local bool preemption_enabled;77 // extern volatile thread_local unsigned short disable_preempt_count;78 78 79 79 struct __cfa_kernel_preemption_state_t { -
src/libcfa/concurrency/preemption.c
r174845e rbeefc34c 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 9 13:52:39 201813 // Update Count : 3 612 // Last Modified On : Tue Jun 5 17:35:49 2018 13 // Update Count : 37 14 14 // 15 15 … … 116 116 // If there are still alarms pending, reset the timer 117 117 if( alarms->head ) { 118 __cfaabi_dbg_print_buffer_decl( " KERNEL: @% lu(%lu) resetting alarm to %lu.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);118 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv); 119 119 Duration delta = alarms->head->alarm - currtime; 120 120 Duration caped = max(delta, 50`us); … … 263 263 } 264 264 265 // kill wrapper : signal a processor266 void terminate(processor * this) {267 this->do_terminate = true;268 wake(this);269 sigval_t value = { PREEMPT_TERMINATE };270 pthread_sigqueue( this->kernel_thread, SIGUSR1, value );271 }272 273 265 // reserved for future use 274 266 static void timeout( thread_desc * this ) { … … 369 361 choose(sfp->si_value.sival_int) { 370 362 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 371 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate);363 case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) ); 372 364 default: 373 365 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 488 480 } 489 481 482 #ifdef __CFA_WITH_VERIFY__ 483 bool __cfaabi_dbg_in_kernel() { 484 return !kernelTLS.preemption_state.enabled; 485 } 486 #endif 487 490 488 // Local Variables: // 491 489 // mode: c //
Note:
See TracChangeset
for help on using the changeset viewer.