Changeset 054514d for src/libcfa/concurrency
- Timestamp:
- May 29, 2018, 3:26:31 PM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 3530f39a
- Parents:
- 96812c0 (diff), da60c631 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/alarm.c
r96812c0 r054514d 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 9 13:36:18201813 // Update Count : 6 112 // Last Modified On : Fri May 25 06:25:47 2018 13 // Update Count : 67 14 14 // 15 15 … … 37 37 38 38 void __kernel_set_timer( Duration alarm ) { 39 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (% luns)", alarm.tv);39 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm.tv); 40 40 setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL ); 41 41 } -
src/libcfa/concurrency/preemption.c
r96812c0 r054514d 161 161 void disable_interrupts() { 162 162 with( kernelTLS.preemption_state ) { 163 enabled = false; 163 #if GCC_VERSION > 50000 164 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 165 #endif 166 167 // Set enabled flag to false 168 // should be atomic to avoid preemption in the middle of the operation. 169 // use memory order RELAXED since there is no inter-thread on this variable requirements 170 __atomic_store_n(&enabled, false, __ATOMIC_RELAXED); 171 172 // Signal the compiler that a fence is needed but only for signal handlers 173 __atomic_signal_fence(__ATOMIC_ACQUIRE); 174 164 175 __attribute__((unused)) unsigned short new_val = disable_count + 1; 165 176 disable_count = new_val; … … 171 182 // If counter reaches 0, execute any pending CtxSwitch 172 183 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 173 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add174 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add184 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 185 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store 175 186 176 187 with( kernelTLS.preemption_state ){ … … 181 192 // Check if we need to prempt the thread because an interrupt was missed 182 193 if( prev == 1 ) { 183 enabled = true; 194 #if GCC_VERSION > 50000 195 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 196 #endif 197 198 // Set enabled flag to true 199 // should be atomic to avoid preemption in the middle of the operation. 200 // use memory order RELAXED since there is no inter-thread on this variable requirements 201 __atomic_store_n(&enabled, true, __ATOMIC_RELAXED); 202 203 // Signal the compiler that a fence is needed but only for signal handlers 204 __atomic_signal_fence(__ATOMIC_RELEASE); 184 205 if( proc->pending_preemption ) { 185 206 proc->pending_preemption = false; … … 200 221 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 201 222 if( prev == 1 ) { 202 kernelTLS.preemption_state.enabled = true; 223 #if GCC_VERSION > 50000 224 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free"); 225 #endif 226 // Set enabled flag to true 227 // should be atomic to avoid preemption in the middle of the operation. 228 // use memory order RELAXED since there is no inter-thread on this variable requirements 229 __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED); 230 231 // Signal the compiler that a fence is needed but only for signal handlers 232 __atomic_signal_fence(__ATOMIC_RELEASE); 203 233 } 204 234 } … … 352 382 353 383 // Clear sighandler mask before context switching. 384 #if GCC_VERSION > 50000 354 385 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 386 #endif 355 387 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) { 356 388 abort( "internal error, sigprocmask" );
Note:
See TracChangeset
for help on using the changeset viewer.