Changeset 28f3a19 for src/libcfa/concurrency
- Timestamp:
- Jun 27, 2018, 3:28:41 PM (8 years ago)
- Branches:
- new-env, with_gc
- Children:
- b21c77a
- Parents:
- 0182bfa (diff), 63238a4 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 2 added
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/invoke.h
r0182bfa r28f3a19 10 10 // Created On : Tue Jan 17 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 22:33:59201813 // Update Count : 3 012 // Last Modified On : Sat May 19 08:23:21 2018 13 // Update Count : 31 14 14 // 15 15 … … 18 18 #include "bits/locks.h" 19 19 20 #define TL_GET( member ) kernelTLS.member21 #define TL_SET( member, value ) kernelTLS.member = value;22 23 20 #ifdef __cforall 24 21 extern "C" { … … 28 25 #ifndef _INVOKE_H_ 29 26 #define _INVOKE_H_ 27 28 #ifdef __ARM_ARCH 29 // function prototypes are only really used by these macros on ARM 30 void disable_global_interrupts(); 31 void enable_global_interrupts(); 32 33 #define TL_GET( member ) ( { __typeof__( kernelTLS.member ) target; \ 34 disable_global_interrupts(); \ 35 target = kernelTLS.member; \ 36 enable_global_interrupts(); \ 37 target; } ) 38 #define TL_SET( member, value ) disable_global_interrupts(); \ 39 kernelTLS.member = value; \ 40 enable_global_interrupts(); 41 #else 42 #define TL_GET( member ) kernelTLS.member 43 #define TL_SET( member, value ) kernelTLS.member = value; 44 #endif 30 45 31 46 #ifdef __cforall -
src/libcfa/concurrency/kernel
r0182bfa r28f3a19 23 23 extern "C" { 24 24 #include <pthread.h> 25 #include <semaphore.h> 25 26 } 26 27 … … 43 44 extern struct cluster * mainCluster; 44 45 45 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule }; 46 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback }; 47 48 typedef void (*__finish_callback_fptr_t)(void); 46 49 47 50 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI) 48 51 struct FinishAction { 49 52 FinishOpCode action_code; 53 /* 54 // Union of possible actions 55 union { 56 // Option 1 : locks and threads 57 struct { 58 // 1 thread or N thread 59 union { 60 thread_desc * thrd; 61 struct { 62 thread_desc ** thrds; 63 unsigned short thrd_count; 64 }; 65 }; 66 // 1 lock or N lock 67 union { 68 __spinlock_t * lock; 69 struct { 70 __spinlock_t ** locks; 71 unsigned short lock_count; 72 }; 73 }; 74 }; 75 // Option 2 : action pointer 76 __finish_callback_fptr_t callback; 77 }; 78 /*/ 50 79 thread_desc * thrd; 80 thread_desc ** thrds; 81 unsigned short thrd_count; 51 82 __spinlock_t * lock; 52 83 __spinlock_t ** locks; 53 84 unsigned short lock_count; 54 thread_desc ** thrds;55 unsigned short thrd_count;85 __finish_callback_fptr_t callback; 86 //*/ 56 87 }; 57 88 static inline void ?{}(FinishAction & this) { … … 82 113 pthread_t kernel_thread; 83 114 115 // RunThread data 116 // Action to do after a thread is ran 117 struct FinishAction finish; 118 119 // Preemption data 120 // Node which is added in the discrete event simulaiton 121 struct alarm_node_t * preemption_alarm; 122 123 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible 124 bool pending_preemption; 125 126 // Idle lock 127 __bin_sem_t idleLock; 128 84 129 // Termination 85 130 // Set to true to notify the processor should terminate … … 89 134 semaphore terminated; 90 135 91 // RunThread data92 // Action to do after a thread is ran93 struct FinishAction finish;94 95 // Preemption data96 // Node which is added in the discrete event simulaiton97 struct alarm_node_t * preemption_alarm;98 99 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible100 bool pending_preemption;101 102 // Idle lock103 104 136 // Link lists fields 105 struct {137 struct __dbg_node_proc { 106 138 struct processor * next; 107 139 struct processor * prev; … … 150 182 151 183 // Link lists fields 152 struct {184 struct __dbg_node_cltr { 153 185 cluster * next; 154 186 cluster * prev; -
src/libcfa/concurrency/kernel.c
r0182bfa r28f3a19 16 16 //C Includes 17 17 #include <stddef.h> 18 #include <errno.h> 19 #include <string.h> 18 20 extern "C" { 19 21 #include <stdio.h> … … 49 51 thread_desc * mainThread; 50 52 51 struct { __dllist_t(cluster ) list; __spinlock_t lock; } global_clusters; 53 extern "C" { 54 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 55 } 52 56 53 57 //----------------------------------------------------------------------------- … … 143 147 runner.proc = &this; 144 148 149 idleLock{}; 150 145 151 start( &this ); 146 152 } 147 153 148 154 void ^?{}(processor & this) with( this ){ 149 if( ! do_terminate) {155 if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) { 150 156 __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this); 151 terminate(&this); 152 verify(this.do_terminate); 153 verify( kernelTLS.this_processor != &this); 157 158 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED); 159 wake( &this ); 160 154 161 P( terminated ); 155 162 verify( kernelTLS.this_processor != &this); 156 pthread_join( kernel_thread, NULL ); 157 } 163 } 164 165 pthread_join( kernel_thread, NULL ); 158 166 } 159 167 … … 194 202 195 203 thread_desc * readyThread = NULL; 196 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ )204 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) 197 205 { 198 206 readyThread = nextThread( this->cltr ); … … 213 221 else 214 222 { 215 spin(this, &spin_count); 223 // spin(this, &spin_count); 224 halt(this); 216 225 } 217 226 } … … 257 266 // its final actions must be executed from the kernel 258 267 void finishRunning(processor * this) with( this->finish ) { 259 if( action_code == Release ) { 260 verify( ! kernelTLS.preemption_state.enabled ); 268 verify( ! kernelTLS.preemption_state.enabled ); 269 choose( action_code ) { 270 case No_Action: 271 break; 272 case Release: 261 273 unlock( *lock ); 262 } 263 else if( action_code == Schedule ) { 274 case Schedule: 264 275 ScheduleThread( thrd ); 265 } 266 else if( action_code == Release_Schedule ) { 267 verify( ! kernelTLS.preemption_state.enabled ); 276 case Release_Schedule: 268 277 unlock( *lock ); 269 278 ScheduleThread( thrd ); 270 } 271 else if( action_code == Release_Multi ) { 272 verify( ! kernelTLS.preemption_state.enabled ); 279 case Release_Multi: 273 280 for(int i = 0; i < lock_count; i++) { 274 281 unlock( *locks[i] ); 275 282 } 276 } 277 else if( action_code == Release_Multi_Schedule ) { 283 case Release_Multi_Schedule: 278 284 for(int i = 0; i < lock_count; i++) { 279 285 unlock( *locks[i] ); … … 282 288 ScheduleThread( thrds[i] ); 283 289 } 284 } 285 else { 286 assert(action_code == No_Action); 287 } 288 } 289 290 // Handles spinning logic 291 // TODO : find some strategy to put cores to sleep after some time 292 void spin(processor * this, unsigned int * spin_count) { 293 (*spin_count)++; 290 case Callback: 291 callback(); 292 default: 293 abort("KERNEL ERROR: Unexpected action to run after thread"); 294 } 294 295 } 295 296 … … 396 397 with( *thrd->curr_cluster ) { 397 398 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 399 bool was_empty = !(ready_queue != 0); 398 400 append( ready_queue, thrd ); 399 401 unlock( ready_queue_lock ); 402 403 if(was_empty) { 404 lock (proc_list_lock __cfaabi_dbg_ctx2); 405 if(idles) { 406 wake_fast(idles.head); 407 } 408 unlock (proc_list_lock); 409 } 410 else if( struct processor * idle = idles.head ) { 411 wake_fast(idle); 412 } 413 400 414 } 401 415 … … 497 511 } 498 512 513 void BlockInternal(__finish_callback_fptr_t callback) { 514 disable_interrupts(); 515 with( *kernelTLS.this_processor ) { 516 finish.action_code = Callback; 517 finish.callback = callback; 518 } 519 520 verify( ! kernelTLS.preemption_state.enabled ); 521 returnToKernel(); 522 verify( ! kernelTLS.preemption_state.enabled ); 523 524 enable_interrupts( __cfaabi_dbg_ctx ); 525 } 526 499 527 // KERNEL ONLY 500 528 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { … … 518 546 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 519 547 520 global_clusters.list{ __get };521 global_clusters.lock{};548 __cfa_dbg_global_clusters.list{ __get }; 549 __cfa_dbg_global_clusters.lock{}; 522 550 523 551 // Initialize the main cluster … … 600 628 // When its coroutine terminates, it return control to the mainThread 601 629 // which is currently here 602 mainProcessor->do_terminate = true;630 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 603 631 returnToKernel(); 632 mainThread->self_cor.state = Halted; 604 633 605 634 // THE SYSTEM IS NOW COMPLETELY STOPPED … … 617 646 ^(mainThread){}; 618 647 619 ^( global_clusters.list){};620 ^( global_clusters.lock){};648 ^(__cfa_dbg_global_clusters.list){}; 649 ^(__cfa_dbg_global_clusters.lock){}; 621 650 622 651 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n"); … … 627 656 //============================================================================================= 628 657 629 // void halt(processor * this) with( this ) { 630 // pthread_mutex_lock( &idle.lock ); 631 632 633 634 // // SKULLDUGGERY: Even if spurious wake-up is a thing 635 // // spuriously waking up a kernel thread is not a big deal 636 // // if it is very rare. 637 // pthread_cond_wait( &idle.cond, &idle.lock); 638 // pthread_mutex_unlock( &idle.lock ); 639 // } 640 641 // void wake(processor * this) with( this ) { 642 // pthread_mutex_lock (&idle.lock); 643 // pthread_cond_signal (&idle.cond); 644 // pthread_mutex_unlock(&idle.lock); 645 // } 658 void halt(processor * this) with( *this ) { 659 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 660 661 with( *cltr ) { 662 lock (proc_list_lock __cfaabi_dbg_ctx2); 663 remove (procs, *this); 664 push_front(idles, *this); 665 unlock (proc_list_lock); 666 } 667 668 __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this); 669 670 wait( idleLock ); 671 672 __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this); 673 674 with( *cltr ) { 675 lock (proc_list_lock __cfaabi_dbg_ctx2); 676 remove (idles, *this); 677 push_front(procs, *this); 678 unlock (proc_list_lock); 679 } 680 } 646 681 647 682 //============================================================================================= … … 758 793 // Global Queues 759 794 void doregister( cluster & cltr ) { 760 lock ( global_clusters.lock __cfaabi_dbg_ctx2);761 push_front( global_clusters.list, cltr );762 unlock ( global_clusters.lock );795 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 796 push_front( __cfa_dbg_global_clusters.list, cltr ); 797 unlock ( __cfa_dbg_global_clusters.lock ); 763 798 } 764 799 765 800 void unregister( cluster & cltr ) { 766 lock ( global_clusters.lock __cfaabi_dbg_ctx2);767 remove( global_clusters.list, cltr );768 unlock( global_clusters.lock );801 lock ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 802 remove( __cfa_dbg_global_clusters.list, cltr ); 803 unlock( __cfa_dbg_global_clusters.lock ); 769 804 } 770 805 -
src/libcfa/concurrency/kernel_private.h
r0182bfa r28f3a19 48 48 void BlockInternal(__spinlock_t * locks [], unsigned short count); 49 49 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 50 void BlockInternal(__finish_callback_fptr_t callback); 50 51 void LeaveThread(__spinlock_t * lock, thread_desc * thrd); 51 52 … … 56 57 void runThread(processor * this, thread_desc * dst); 57 58 void finishRunning(processor * this); 58 void terminate(processor * this); 59 void spin(processor * this, unsigned int * spin_count); 59 void halt(processor * this); 60 61 static inline void wake_fast(processor * this) { 62 __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this); 63 post( this->idleLock ); 64 } 65 66 static inline void wake(processor * this) { 67 disable_interrupts(); 68 wake_fast(this); 69 enable_interrupts( __cfaabi_dbg_ctx ); 70 } 60 71 61 72 struct event_kernel_t { … … 65 76 66 77 extern event_kernel_t * event_kernel; 67 68 //extern thread_local coroutine_desc * volatile this_coroutine;69 //extern thread_local thread_desc * volatile this_thread;70 //extern thread_local processor * volatile this_processor;71 72 // extern volatile thread_local bool preemption_in_progress;73 // extern volatile thread_local bool preemption_enabled;74 // extern volatile thread_local unsigned short disable_preempt_count;75 78 76 79 struct __cfa_kernel_preemption_state_t { -
src/libcfa/concurrency/monitor.c
r0182bfa r28f3a19 297 297 this.count = count; 298 298 299 // Sort monitors based on address -> TODO use a sort specialized for small numbers299 // Sort monitors based on address 300 300 __libcfa_small_sort(this.m, count); 301 301 -
src/libcfa/concurrency/preemption.c
r0182bfa r28f3a19 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 9 13:52:39 201813 // Update Count : 3 612 // Last Modified On : Tue Jun 5 17:35:49 2018 13 // Update Count : 37 14 14 // 15 15 … … 116 116 // If there are still alarms pending, reset the timer 117 117 if( alarms->head ) { 118 __cfaabi_dbg_print_buffer_decl( " KERNEL: @% lu(%lu) resetting alarm to %lu.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);118 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv); 119 119 Duration delta = alarms->head->alarm - currtime; 120 120 Duration caped = max(delta, 50`us); … … 161 161 void disable_interrupts() { 162 162 with( kernelTLS.preemption_state ) { 163 #if GCC_VERSION > 50000 163 164 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 165 #endif 164 166 165 167 // Set enabled flag to false … … 190 192 // Check if we need to prempt the thread because an interrupt was missed 191 193 if( prev == 1 ) { 194 #if GCC_VERSION > 50000 192 195 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 196 #endif 193 197 194 198 // Set enabled flag to true … … 217 221 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 218 222 if( prev == 1 ) { 223 #if GCC_VERSION > 50000 219 224 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free"); 225 #endif 220 226 // Set enabled flag to true 221 227 // should be atomic to avoid preemption in the middle of the operation. … … 254 260 static void preempt( processor * this ) { 255 261 sigval_t value = { PREEMPT_NORMAL }; 256 pthread_sigqueue( this->kernel_thread, SIGUSR1, value );257 }258 259 // kill wrapper : signal a processor260 void terminate(processor * this) {261 this->do_terminate = true;262 sigval_t value = { PREEMPT_TERMINATE };263 262 pthread_sigqueue( this->kernel_thread, SIGUSR1, value ); 264 263 } … … 362 361 choose(sfp->si_value.sival_int) { 363 362 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 364 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate);363 case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) ); 365 364 default: 366 365 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 376 375 377 376 // Clear sighandler mask before context switching. 377 #if GCC_VERSION > 50000 378 378 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 379 #endif 379 380 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) { 380 381 abort( "internal error, sigprocmask" ); … … 479 480 } 480 481 482 #ifdef __CFA_WITH_VERIFY__ 483 bool __cfaabi_dbg_in_kernel() { 484 return !kernelTLS.preemption_state.enabled; 485 } 486 #endif 487 481 488 // Local Variables: // 482 489 // mode: c //
Note:
See TracChangeset
for help on using the changeset viewer.