Changeset 58e822a for src/libcfa/concurrency/preemption.c
- Timestamp:
- May 25, 2018, 2:10:09 PM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 91cfa34, eba74ba
- Parents:
- 45040f06 (diff), 8dbedfc (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/preemption.c
r45040f06 r58e822a 161 161 void disable_interrupts() { 162 162 with( kernelTLS.preemption_state ) { 163 enabled = false; 163 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 164 165 // Set enabled flag to false 166 // should be atomic to avoid preemption in the middle of the operation. 167 // use memory order RELAXED since there is no inter-thread on this variable requirements 168 __atomic_store_n(&enabled, false, __ATOMIC_RELAXED); 169 170 // Signal the compiler that a fence is needed but only for signal handlers 171 __atomic_signal_fence(__ATOMIC_ACQUIRE); 172 164 173 __attribute__((unused)) unsigned short new_val = disable_count + 1; 165 174 disable_count = new_val; … … 171 180 // If counter reaches 0, execute any pending CtxSwitch 172 181 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 173 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add174 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add182 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 183 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store 175 184 176 185 with( kernelTLS.preemption_state ){ … … 181 190 // Check if we need to prempt the thread because an interrupt was missed 182 191 if( prev == 1 ) { 183 enabled = true; 192 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 193 194 // Set enabled flag to true 195 // should be atomic to avoid preemption in the middle of the operation. 196 // use memory order RELAXED since there is no inter-thread on this variable requirements 197 __atomic_store_n(&enabled, true, __ATOMIC_RELAXED); 198 199 // Signal the compiler that a fence is needed but only for signal handlers 200 __atomic_signal_fence(__ATOMIC_RELEASE); 184 201 if( proc->pending_preemption ) { 185 202 proc->pending_preemption = false; … … 200 217 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 201 218 if( prev == 1 ) { 202 kernelTLS.preemption_state.enabled = true; 219 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free"); 220 // Set enabled flag to true 221 // should be atomic to avoid preemption in the middle of the operation. 222 // use memory order RELAXED since there is no inter-thread on this variable requirements 223 __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED); 224 225 // Signal the compiler that a fence is needed but only for signal handlers 226 __atomic_signal_fence(__ATOMIC_RELEASE); 203 227 } 204 228 }
Note: See TracChangeset
for help on using the changeset viewer.