- File:
-
- 1 edited
-
src/libcfa/concurrency/preemption.c (modified) (9 diffs)
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/preemption.c
r85b1deb r13073be 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Tue Jun 5 17:35:49 201813 // Update Count : 3 712 // Last Modified On : Mon Apr 9 13:52:39 2018 13 // Update Count : 36 14 14 // 15 15 … … 116 116 // If there are still alarms pending, reset the timer 117 117 if( alarms->head ) { 118 __cfaabi_dbg_print_buffer_decl( " KERNEL: @% ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);118 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%lu(%lu) resetting alarm to %lu.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv); 119 119 Duration delta = alarms->head->alarm - currtime; 120 120 Duration caped = max(delta, 50`us); … … 161 161 void disable_interrupts() { 162 162 with( kernelTLS.preemption_state ) { 163 #if GCC_VERSION > 50000164 163 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 165 #endif166 164 167 165 // Set enabled flag to false … … 192 190 // Check if we need to prempt the thread because an interrupt was missed 193 191 if( prev == 1 ) { 194 #if GCC_VERSION > 50000195 192 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 196 #endif197 193 198 194 // Set enabled flag to true … … 221 217 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 222 218 if( prev == 1 ) { 223 #if GCC_VERSION > 50000224 219 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free"); 225 #endif226 220 // Set enabled flag to true 227 221 // should be atomic to avoid preemption in the middle of the operation. … … 260 254 static void preempt( processor * this ) { 261 255 sigval_t value = { PREEMPT_NORMAL }; 256 pthread_sigqueue( this->kernel_thread, SIGUSR1, value ); 257 } 258 259 // kill wrapper : signal a processor 260 void terminate(processor * this) { 261 this->do_terminate = true; 262 sigval_t value = { PREEMPT_TERMINATE }; 262 263 pthread_sigqueue( this->kernel_thread, SIGUSR1, value ); 263 264 } … … 361 362 choose(sfp->si_value.sival_int) { 362 363 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 363 case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ));364 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate); 364 365 default: 365 366 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 375 376 376 377 // Clear sighandler mask before context switching. 377 #if GCC_VERSION > 50000378 378 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 379 #endif380 379 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) { 381 380 abort( "internal error, sigprocmask" ); … … 480 479 } 481 480 482 #ifdef __CFA_WITH_VERIFY__483 bool __cfaabi_dbg_in_kernel() {484 return !kernelTLS.preemption_state.enabled;485 }486 #endif487 488 481 // Local Variables: // 489 482 // mode: c //
Note:
See TracChangeset
for help on using the changeset viewer.