Changeset 969b3fe
- Timestamp:
- Jul 18, 2017, 1:05:13 PM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- a57cb58
- Parents:
- 5bd0aad
- Location:
- src/libcfa/concurrency
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/alarm.c
r5bd0aad r969b3fe 31 31 32 32 //============================================================================================= 33 // time type 34 //============================================================================================= 35 36 #define one_second 1_000_000_000ul 37 #define one_milisecond 1_000_000ul 38 #define one_microsecond 1_000ul 39 #define one_nanosecond 1ul 40 41 __cfa_time_t zero_time = { 0 }; 42 43 void ?{}( __cfa_time_t * this ) { this->val = 0; } 44 void ?{}( __cfa_time_t * this, zero_t zero ) { this->val = 0; } 45 46 void ?{}( itimerval * this, __cfa_time_t * alarm ) { 47 this->it_value.tv_sec = alarm->val / one_second; // seconds 48 this->it_value.tv_usec = max( (alarm->val % one_second) / one_microsecond, 1000 ); // microseconds 49 this->it_interval.tv_sec = 0; 50 this->it_interval.tv_usec = 0; 51 } 52 53 54 void ?{}( __cfa_time_t * this, timespec * curr ) { 55 uint64_t secs = curr->tv_sec; 56 uint64_t nsecs = curr->tv_nsec; 57 this->val = (secs * one_second) + nsecs; 58 } 59 60 __cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs ) { 61 this->val = 0; 62 return *this; 63 } 64 65 __cfa_time_t from_s ( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000_000ul; return ret; } 66 __cfa_time_t from_ms( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000ul; return ret; } 67 __cfa_time_t from_us( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000ul; return ret; } 68 __cfa_time_t from_ns( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1ul; return ret; } 69 70 //============================================================================================= 33 71 // Clock logic 34 72 //============================================================================================= … … 37 75 timespec curr; 38 76 clock_gettime( CLOCK_REALTIME, &curr ); 39 __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec; 40 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time ); 41 return curr_time; 77 return (__cfa_time_t){ &curr }; 42 78 } 43 79 44 80 void __kernel_set_timer( __cfa_time_t alarm ) { 45 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %llu\n", (__cfa_time_t)alarm ); 46 itimerval val; 47 val.it_value.tv_sec = alarm / TIMEGRAN; // seconds 48 val.it_value.tv_usec = max( (alarm % TIMEGRAN) / ( TIMEGRAN / 1_000_000L ), 1000 ); // microseconds 49 val.it_interval.tv_sec = 0; 50 val.it_interval.tv_usec = 0; 81 itimerval val = { &alarm }; 51 82 setitimer( ITIMER_REAL, &val, NULL ); 52 83 } … … 56 87 //============================================================================================= 57 88 58 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0) {89 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) { 59 90 this->thrd = thrd; 60 91 this->alarm = alarm; … … 65 96 } 66 97 67 void ?{}( alarm_node_t * this, processor * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0) {98 void ?{}( alarm_node_t * this, processor * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) { 68 99 this->proc = proc; 69 100 this->alarm = alarm; -
src/libcfa/concurrency/alarm.h
r5bd0aad r969b3fe 23 23 #include "assert" 24 24 25 typedef uint64_t __cfa_time_t;26 27 25 struct thread_desc; 28 26 struct processor; 27 28 struct timespec; 29 struct itimerval; 30 31 //============================================================================================= 32 // time type 33 //============================================================================================= 34 35 struct __cfa_time_t { 36 uint64_t val; 37 }; 38 39 // ctors 40 void ?{}( __cfa_time_t * this ); 41 void ?{}( __cfa_time_t * this, zero_t zero ); 42 void ?{}( __cfa_time_t * this, timespec * curr ); 43 void ?{}( itimerval * this, __cfa_time_t * alarm ); 44 45 __cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs ); 46 47 // logical ops 48 static inline bool ?==?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val == rhs.val; } 49 static inline bool ?!=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val != rhs.val; } 50 static inline bool ?>? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val > rhs.val; } 51 static inline bool ?<? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val < rhs.val; } 52 static inline bool ?>=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >= rhs.val; } 53 static inline bool ?<=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <= rhs.val; } 54 55 static inline bool ?==?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val == rhs; } 56 static inline bool ?!=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val != rhs; } 57 static inline bool ?>? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val > rhs; } 58 static inline bool ?<? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val < rhs; } 59 static inline bool ?>=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >= rhs; } 60 static inline bool ?<=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <= rhs; } 61 62 // addition/substract 63 static inline __cfa_time_t ?+?( __cfa_time_t lhs, __cfa_time_t rhs ) { 64 __cfa_time_t ret; 65 ret.val = lhs.val + rhs.val; 66 return ret; 67 } 68 69 static inline __cfa_time_t ?-?( __cfa_time_t lhs, __cfa_time_t rhs ) { 70 __cfa_time_t ret; 71 ret.val = lhs.val - rhs.val; 72 return ret; 73 } 74 75 __cfa_time_t from_s ( uint64_t ); 76 __cfa_time_t from_ms( uint64_t ); 77 __cfa_time_t from_us( uint64_t ); 78 __cfa_time_t from_ns( uint64_t ); 79 80 extern __cfa_time_t zero_time; 29 81 30 82 //============================================================================================= 31 83 // Clock logic 32 84 //============================================================================================= 33 34 #define TIMEGRAN 1_000_000_000L // nanosecond granularity, except for timeval35 85 36 86 __cfa_time_t __kernel_get_time(); … … 57 107 typedef alarm_node_t ** __alarm_it_t; 58 108 59 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0);60 void ?{}( alarm_node_t * this, processor * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0);109 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ); 110 void ?{}( alarm_node_t * this, processor * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ); 61 111 void ^?{}( alarm_node_t * this ); 62 112 -
src/libcfa/concurrency/kernel.c
r5bd0aad r969b3fe 42 42 //----------------------------------------------------------------------------- 43 43 // Kernel storage 44 #define KERNEL_STORAGE(T,X) static char X##Storage[sizeof(T)] 45 46 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx); 47 KERNEL_STORAGE(cluster, systemCluster); 48 KERNEL_STORAGE(system_proc_t, systemProcessor); 49 KERNEL_STORAGE(event_kernel_t, event_kernel); 50 KERNEL_STORAGE(thread_desc, mainThread); 44 KERNEL_STORAGE(cluster, mainCluster); 45 KERNEL_STORAGE(processor, mainProcessor); 46 KERNEL_STORAGE(processorCtx_t, mainProcessorCtx); 47 KERNEL_STORAGE(thread_desc, mainThread); 51 48 KERNEL_STORAGE(machine_context_t, mainThreadCtx); 52 49 53 cluster * systemCluster; 54 system_proc_t * systemProcessor; 55 event_kernel_t * event_kernel; 50 cluster * mainCluster; 51 processor * mainProcessor; 56 52 thread_desc * mainThread; 57 53 … … 59 55 // Global state 60 56 61 volatile thread_local processor * this_processor;62 57 volatile thread_local coroutine_desc * this_coroutine; 63 58 volatile thread_local thread_desc * this_thread; 59 volatile thread_local processor * this_processor; 60 64 61 volatile thread_local bool preemption_in_progress = 0; 65 62 volatile thread_local unsigned short disable_preempt_count = 1; … … 87 84 88 85 this->limit = (void *)(((intptr_t)this->base) - this->size); 89 this->context = & mainThreadCtxStorage;86 this->context = &storage_mainThreadCtx; 90 87 this->top = this->base; 91 88 } … … 127 124 128 125 void ?{}(processor * this) { 129 this{ systemCluster };126 this{ mainCluster }; 130 127 } 131 128 … … 149 146 150 147 this->runner = runner; 151 LIB_DEBUG_PRINT_SAFE("Kernel : constructing systemprocessor context %p\n", runner);148 LIB_DEBUG_PRINT_SAFE("Kernel : constructing main processor context %p\n", runner); 152 149 runner{ this }; 153 }154 155 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )156 157 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) {158 (&this->proc){ cltr, runner };159 }160 161 void ?{}(event_kernel_t * this) {162 (&this->alarms){};163 (&this->lock){};164 165 verify( validate( &this->alarms ) );166 150 } 167 151 … … 346 330 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 347 331 348 lock( &systemProcessor->proc.cltr->ready_queue_lock DEBUG_CTX2 );349 append( & systemProcessor->proc.cltr->ready_queue, thrd );350 unlock( & systemProcessor->proc.cltr->ready_queue_lock );332 lock( &this_processor->cltr->ready_queue_lock DEBUG_CTX2 ); 333 append( &this_processor->cltr->ready_queue, thrd ); 334 unlock( &this_processor->cltr->ready_queue_lock ); 351 335 352 336 verify( disable_preempt_count > 0 ); … … 455 439 // Start by initializing the main thread 456 440 // SKULLDUGGERY: the mainThread steals the process main thread 457 // which will then be scheduled by the systemProcessor normally458 mainThread = (thread_desc *)& mainThreadStorage;441 // which will then be scheduled by the mainProcessor normally 442 mainThread = (thread_desc *)&storage_mainThread; 459 443 current_stack_info_t info; 460 444 mainThread{ &info }; … … 462 446 LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n"); 463 447 464 // Initialize the systemcluster465 systemCluster = (cluster *)&systemClusterStorage;466 systemCluster{};467 468 LIB_DEBUG_PRINT_SAFE("Kernel : Systemcluster ready\n");469 470 // Initialize the system processor and the systemprocessor ctx448 // Initialize the main cluster 449 mainCluster = (cluster *)&storage_mainCluster; 450 mainCluster{}; 451 452 LIB_DEBUG_PRINT_SAFE("Kernel : main cluster ready\n"); 453 454 // Initialize the main processor and the main processor ctx 471 455 // (the coroutine that contains the processing control flow) 472 systemProcessor = (system_proc_t *)&systemProcessorStorage; 473 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtxStorage }; 474 475 // Initialize the event kernel 476 event_kernel = (event_kernel_t *)&event_kernelStorage; 477 event_kernel{}; 478 479 // Add the main thread to the ready queue 480 // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread 481 ScheduleThread(mainThread); 456 mainProcessor = (processor *)&storage_mainProcessor; 457 mainProcessor{ mainCluster, (processorCtx_t *)&storage_mainProcessorCtx }; 482 458 483 459 //initialize the global state variables 484 this_processor = &systemProcessor->proc;460 this_processor = mainProcessor; 485 461 this_thread = mainThread; 486 462 this_coroutine = &mainThread->cor; 487 disable_preempt_count = 1;488 463 489 464 // Enable preemption 490 465 kernel_start_preemption(); 491 466 492 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX 467 // Add the main thread to the ready queue 468 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 469 ScheduleThread(mainThread); 470 471 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 493 472 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 494 473 // mainThread is on the ready queue when this call is made. 495 resume( systemProcessor->proc.runner );474 resume( mainProcessor->runner ); 496 475 497 476 … … 508 487 disable_interrupts(); 509 488 510 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.489 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. 511 490 // When its coroutine terminates, it return control to the mainThread 512 491 // which is currently here 513 systemProcessor->proc.do_terminate = true;492 mainProcessor->do_terminate = true; 514 493 suspend(); 515 494 … … 519 498 kernel_stop_preemption(); 520 499 521 // Destroy the systemprocessor and its context in reverse order of construction500 // Destroy the main processor and its context in reverse order of construction 522 501 // These were manually constructed so we need manually destroy them 523 ^( systemProcessor->proc.runner){};524 ^( systemProcessor){};502 ^(mainProcessor->runner){}; 503 ^(mainProcessor){}; 525 504 526 505 // Final step, destroy the main thread since it is no longer needed -
src/libcfa/concurrency/kernel_private.h
r5bd0aad r969b3fe 31 31 extern "C" { 32 32 void disable_interrupts(); 33 void enable_interrupts_no RF();33 void enable_interrupts_noPoll(); 34 34 void enable_interrupts( DEBUG_CTX_PARAM ); 35 35 } … … 66 66 void spin(processor * this, unsigned int * spin_count); 67 67 68 struct system_proc_t {69 processor proc;70 };71 72 68 struct event_kernel_t { 73 69 alarm_list_t alarms; … … 75 71 }; 76 72 77 extern cluster * systemCluster;78 extern system_proc_t * systemProcessor;79 73 extern event_kernel_t * event_kernel; 80 74 … … 94 88 extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst); 95 89 90 //----------------------------------------------------------------------------- 91 // Utils 92 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 93 96 94 #endif //KERNEL_PRIVATE_H 97 95 -
src/libcfa/concurrency/preemption.c
r5bd0aad r969b3fe 34 34 #endif 35 35 36 //TODO move to defaults 36 37 #define __CFA_DEFAULT_PREEMPTION__ 10000 37 38 39 //TODO move to defaults 38 40 __attribute__((weak)) unsigned int default_preemption() { 39 41 return __CFA_DEFAULT_PREEMPTION__; 40 42 } 41 43 44 // Short hands for signal context information 42 45 #define __CFA_SIGCXT__ ucontext_t * 43 46 #define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt 44 47 48 // FwdDeclarations : timeout handlers 45 49 static void preempt( processor * this ); 46 50 static void timeout( thread_desc * this ); 47 51 52 // FwdDeclarations : Signal handlers 48 53 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 49 void sigHandler_alarm ( __CFA_SIGPARMS__ );50 54 void sigHandler_segv ( __CFA_SIGPARMS__ ); 51 55 void sigHandler_abort ( __CFA_SIGPARMS__ ); 52 56 57 // FwdDeclarations : sigaction wrapper 53 58 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ); 54 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); ) 55 59 60 // FwdDeclarations : alarm thread main 61 void * alarm_loop( __attribute__((unused)) void * args ); 62 63 // Machine specific register name 56 64 #ifdef __x86_64__ 57 65 #define CFA_REG_IP REG_RIP … … 60 68 #endif 61 69 70 KERNEL_STORAGE(event_kernel_t, event_kernel); // private storage for event kernel 71 event_kernel_t * event_kernel; // kernel public handle to even kernel 72 static pthread_t alarm_thread; // pthread handle to alarm thread 73 74 void ?{}(event_kernel_t * this) { 75 (&this->alarms){}; 76 (&this->lock){}; 77 } 62 78 63 79 //============================================================================================= … … 65 81 //============================================================================================= 66 82 83 // Get next expired node 84 static inline alarm_node_t * get_expired( alarm_list_t * alarms, __cfa_time_t currtime ) { 85 if( !alarms->head ) return NULL; // If no alarms return null 86 if( alarms->head->alarm >= currtime ) return NULL; // If alarms head not expired return null 87 return pop(alarms); // Otherwise just pop head 88 } 89 90 // Tick one frame of the Discrete Event Simulation for alarms 67 91 void tick_preemption() { 68 alarm_ list_t * alarms = &event_kernel->alarms;69 __cfa_time_t currtime = __kernel_get_time();70 71 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption @ %llu\n", currtime ); 72 while( alarms->head && alarms->head->alarm < currtime ) {73 alarm_node_t * node = pop(alarms);74 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node ); 75 92 alarm_node_t * node = NULL; // Used in the while loop but cannot be declared in the while condition 93 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading 94 __cfa_time_t currtime = __kernel_get_time(); // Check current time once so we everything "happens at once" 95 96 //Loop throught every thing expired 97 while( node = get_expired( alarms, currtime ) ) { 98 99 // Check if this is a kernel 76 100 if( node->kernel_alarm ) { 77 101 preempt( node->proc ); … … 81 105 } 82 106 83 verify( validate( alarms ) ); 84 107 // Check if this is a periodic alarm 85 108 __cfa_time_t period = node->period; 86 109 if( period > 0 ) { 87 node->alarm = currtime + period; 88 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Reinsert %p @ %llu (%llu + %llu)\n", node, node->alarm, currtime, period ); 89 insert( alarms, node ); 110 node->alarm = currtime + period; // Alarm is periodic, add currtime to it (used cached current time) 111 insert( alarms, node ); // Reinsert the node for the next time it triggers 90 112 } 91 113 else { 92 node->set = false; 93 } 94 } 95 96 if( alarms->head ) { 97 __kernel_set_timer( alarms->head->alarm - currtime ); 98 } 99 100 verify( validate( alarms ) ); 101 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" ); 102 } 103 114 node->set = false; // Node is one-shot, just mark it as not pending 115 } 116 } 117 118 // If there are still alarms pending, reset the timer 119 if( alarms->head ) { __kernel_set_timer( alarms->head->alarm - currtime ); } 120 } 121 122 // Update the preemption of a processor and notify interested parties 104 123 void update_preemption( processor * this, __cfa_time_t duration ) { 105 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %llu\n", this, duration );106 107 124 alarm_node_t * alarm = this->preemption_alarm; 108 duration *= 1000;109 125 110 126 // Alarms need to be enabled … … 136 152 137 153 extern "C" { 154 // Disable interrupts by incrementing the counter 138 155 void disable_interrupts() { 139 156 __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 140 verify( new_val < (unsigned short)65_000 ); 141 verify( new_val != (unsigned short) 0 ); 142 } 143 144 void enable_interrupts_noRF() { 145 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 146 verify( prev != (unsigned short) 0 ); 147 } 148 157 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 158 } 159 160 // Enable interrupts by decrementing the counter 161 // If counter reaches 0, execute any pending CtxSwitch 149 162 void enable_interrupts( DEBUG_CTX_PARAM ) { 150 processor * proc = this_processor; 151 thread_desc * thrd = this_thread; 163 processor * proc = this_processor; // Cache the processor now since interrupts can start happening after the atomic add 164 thread_desc * thrd = this_thread; // Cache the thread now since interrupts can start happening after the atomic add 165 152 166 unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 153 verify( prev != (unsigned short) 0 ); 167 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 168 169 // Check if we need to prempt the thread because an interrupt was missed 154 170 if( prev == 1 && proc->pending_preemption ) { 155 171 proc->pending_preemption = false; … … 157 173 } 158 174 175 // For debugging purposes : keep track of the last person to enable the interrupts 159 176 LIB_DEBUG_DO( proc->last_enable = caller; ) 160 177 } 161 } 162 178 179 // Disable interrupts by incrementint the counter 180 // Don't execute any pending CtxSwitch even if counter reaches 0 181 void enable_interrupts_noPoll() { 182 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 183 verify( prev != 0u ); // If this triggers someone is enabled already enabled interrupts 184 } 185 } 186 187 // sigprocmask wrapper : unblock a single signal 163 188 static inline void signal_unblock( int sig ) { 164 189 sigset_t mask; … … 171 196 } 172 197 198 // sigprocmask wrapper : block a single signal 173 199 static inline void signal_block( int sig ) { 174 200 sigset_t mask; … … 181 207 } 182 208 183 static inline bool preemption_ready() { 184 return disable_preempt_count == 0 && !preemption_in_progress; 185 } 186 187 static inline void defer_ctxSwitch() { 188 this_processor->pending_preemption = true; 189 } 190 209 // kill wrapper : signal a processor 191 210 static void preempt( processor * this ) { 192 211 pthread_kill( this->kernel_thread, SIGUSR1 ); 193 212 } 194 213 214 // reserved for future use 195 215 static void timeout( thread_desc * this ) { 196 216 //TODO : implement waking threads 197 217 } 198 218 219 220 // Check if a CtxSwitch signal handler shoud defer 221 // If true : preemption is safe 222 // If false : preemption is unsafe and marked as pending 223 static inline bool preemption_ready() { 224 bool ready = disable_preempt_count == 0 && !preemption_in_progress; // Check if preemption is safe 225 this_processor->pending_preemption = !ready; // Adjust the pending flag accordingly 226 return ready; 227 } 228 199 229 //============================================================================================= 200 230 // Kernel Signal Startup/Shutdown logic 201 231 //============================================================================================= 202 232 203 static pthread_t alarm_thread; 204 void * alarm_loop( __attribute__((unused)) void * args ); 205 233 // Startup routine to activate preemption 234 // Called from kernel_startup 206 235 void kernel_start_preemption() { 207 236 LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n"); 208 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); 209 // __kernel_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); 210 // __kernel_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); 237 238 // Start with preemption disabled until ready 239 disable_preempt_count = 1; 240 241 // Initialize the event kernel 242 event_kernel = (event_kernel_t *)&storage_event_kernel; 243 event_kernel{}; 244 245 // Setup proper signal handlers 246 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); // CtxSwitch handler 247 // __kernel_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); // Failure handler 248 // __kernel_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); // Failure handler 211 249 212 250 signal_block( SIGALRM ); … … 215 253 } 216 254 255 // Shutdown routine to deactivate preemption 256 // Called from kernel_shutdown 217 257 void kernel_stop_preemption() { 218 258 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopping\n"); 219 259 260 // Block all signals since we are already shutting down 220 261 sigset_t mask; 221 262 sigfillset( &mask ); 222 263 sigprocmask( SIG_BLOCK, &mask, NULL ); 223 264 265 // Notify the alarm thread of the shutdown 224 266 sigval val = { 1 }; 225 267 pthread_sigqueue( alarm_thread, SIGALRM, val ); 268 269 // Wait for the preemption thread to finish 226 270 pthread_join( alarm_thread, NULL ); 271 272 // Preemption is now fully stopped 273 227 274 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n"); 228 275 } 229 276 277 // Raii ctor/dtor for the preemption_scope 278 // Used by thread to control when they want to receive preemption signals 230 279 void ?{}( preemption_scope * this, processor * proc ) { 231 (&this->alarm){ proc };280 (&this->alarm){ proc, zero_time, zero_time }; 232 281 this->proc = proc; 233 282 this->proc->preemption_alarm = &this->alarm; 234 update_preemption( this->proc, this->proc->cltr->preemption ); 283 284 update_preemption( this->proc, from_us(this->proc->cltr->preemption) ); 235 285 } 236 286 … … 238 288 disable_interrupts(); 239 289 240 update_preemption( this->proc, 0);290 update_preemption( this->proc, zero_time ); 241 291 } 242 292 … … 245 295 //============================================================================================= 246 296 297 // Context switch signal handler 298 // Receives SIGUSR1 signal and causes the current thread to yield 247 299 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 248 300 LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); ) 249 if( preemption_ready() ) { 250 preemption_in_progress = true; 251 signal_unblock( SIGUSR1 ); 252 this_processor->pending_preemption = false; 253 preemption_in_progress = false; 254 BlockInternal( (thread_desc*)this_thread ); 255 } 256 else { 257 defer_ctxSwitch(); 258 } 259 } 260 301 302 // Check if it is safe to preempt here 303 if( !preemption_ready() ) { return; } 304 305 preemption_in_progress = true; // Sync flag : prevent recursive calls to the signal handler 306 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 307 preemption_in_progress = false; // Clear the in progress flag 308 309 // Preemption can occur here 310 311 BlockInternal( (thread_desc*)this_thread ); // Do the actual CtxSwitch 312 } 313 314 // Main of the alarm thread 315 // Waits on SIGALRM and send SIGUSR1 to whom ever needs it 261 316 void * alarm_loop( __attribute__((unused)) void * args ) { 317 // Block sigalrms to control when they arrive 262 318 sigset_t mask; 263 319 sigemptyset( &mask ); … … 268 324 } 269 325 326 // Main loop 270 327 while( true ) { 328 // Wait for a sigalrm 271 329 siginfo_t info; 272 330 int sig = sigwaitinfo( &mask, &info ); 331 332 // If another signal arrived something went wrong 273 333 assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int); 274 334 275 335 LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int ); 336 // Switch on the code (a.k.a. the sender) to 276 337 switch( info.si_code ) 277 338 { 339 // Timers can apparently be marked as sent for the kernel 340 // In either case, tick preemption 278 341 case SI_TIMER: 279 342 case SI_KERNEL: … … 283 346 unlock( &event_kernel->lock ); 284 347 break; 348 // Signal was not sent by the kernel but by an other thread 285 349 case SI_QUEUE: 350 // For now, other thread only signal the alarm thread to shut it down 351 // If this needs to change use info.si_value and handle the case here 286 352 goto EXIT; 287 353 } … … 293 359 } 294 360 361 // Sigaction wrapper : register an signal handler 295 362 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) { 296 363 struct sigaction act; … … 308 375 } 309 376 310 typedef void (*sa_handler_t)(int); 311 377 // Sigaction wrapper : restore default handler 312 378 static void __kernel_sigdefault( int sig ) { 313 379 struct sigaction act; 314 380 315 //act.sa_handler = SIG_DFL;381 act.sa_handler = SIG_DFL; 316 382 act.sa_flags = 0; 317 383 sigemptyset( &act.sa_mask );
Note: See TracChangeset
for help on using the changeset viewer.