Changeset d0a045c7 for src/libcfa/concurrency/preemption.c
- Timestamp:
- Feb 1, 2018, 5:37:37 PM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 85521c7
- Parents:
- e76bd39
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/preemption.c
re76bd39 rd0a045c7 142 142 // Disable interrupts by incrementing the counter 143 143 void disable_interrupts() { 144 __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 144 preemption_enabled = false; 145 __attribute__((unused)) unsigned short new_val = disable_preempt_count + 1; 146 disable_preempt_count = new_val; 145 147 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 146 148 } … … 152 154 thread_desc * thrd = this_thread; // Cache the thread now since interrupts can start happening after the atomic add 153 155 154 unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 156 unsigned short prev = disable_preempt_count; 157 disable_preempt_count -= 1; 155 158 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 156 159 157 160 // Check if we need to prempt the thread because an interrupt was missed 158 if( prev == 1 && proc->pending_preemption ) { 159 proc->pending_preemption = false; 160 BlockInternal( thrd ); 161 if( prev == 1 ) { 162 preemption_enabled = true; 163 if( proc->pending_preemption ) { 164 proc->pending_preemption = false; 165 BlockInternal( thrd ); 166 } 161 167 } 162 168 … … 168 174 // Don't execute any pending CtxSwitch even if counter reaches 0 169 175 void enable_interrupts_noPoll() { 170 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 176 unsigned short prev = disable_preempt_count; 177 disable_preempt_count -= 1; 171 178 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 179 if( prev == 1 ) { 180 preemption_enabled = true; 181 } 172 182 } 173 183 } … … 210 220 // If false : preemption is unsafe and marked as pending 211 221 static inline bool preemption_ready() { 212 bool ready = disable_preempt_count == 0&& !preemption_in_progress; // Check if preemption is safe222 bool ready = preemption_enabled && !preemption_in_progress; // Check if preemption is safe 213 223 this_processor->pending_preemption = !ready; // Adjust the pending flag accordingly 214 224 return ready; … … 225 235 226 236 // Start with preemption disabled until ready 237 preemption_enabled = false; 227 238 disable_preempt_count = 1; 228 239
Note: See TracChangeset
for help on using the changeset viewer.