Changeset b69ea6b for src/libcfa/concurrency/preemption.c
- Timestamp:
- Feb 15, 2018, 10:52:35 AM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- d27e340
- Parents:
- ff2d1139
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/preemption.c
rff2d1139 rb69ea6b 149 149 // Disable interrupts by incrementing the counter 150 150 void disable_interrupts() { 151 preemption _enabled = false;152 __attribute__((unused)) unsigned short new_val = disable_preempt_count + 1;153 disable_preempt_count = new_val;151 preemption.enabled = false; 152 __attribute__((unused)) unsigned short new_val = preemption.disable_count + 1; 153 preemption.disable_count = new_val; 154 154 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 155 155 } … … 161 161 thread_desc * thrd = this_thread; // Cache the thread now since interrupts can start happening after the atomic add 162 162 163 unsigned short prev = disable_preempt_count;164 disable_preempt_count -= 1;163 unsigned short prev = preemption.disable_count; 164 preemption.disable_count -= 1; 165 165 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 166 166 167 167 // Check if we need to prempt the thread because an interrupt was missed 168 168 if( prev == 1 ) { 169 preemption _enabled = true;169 preemption.enabled = true; 170 170 if( proc->pending_preemption ) { 171 171 proc->pending_preemption = false; … … 181 181 // Don't execute any pending CtxSwitch even if counter reaches 0 182 182 void enable_interrupts_noPoll() { 183 unsigned short prev = disable_preempt_count;184 disable_preempt_count -= 1;183 unsigned short prev = preemption.disable_count; 184 preemption.disable_count -= 1; 185 185 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 186 186 if( prev == 1 ) { 187 preemption _enabled = true;187 preemption.enabled = true; 188 188 } 189 189 } … … 235 235 // If false : preemption is unsafe and marked as pending 236 236 static inline bool preemption_ready() { 237 bool ready = preemption _enabled && !preemption_in_progress; // Check if preemption is safe237 bool ready = preemption.enabled && !preemption.in_progress; // Check if preemption is safe 238 238 this_processor->pending_preemption = !ready; // Adjust the pending flag accordingly 239 239 return ready; … … 250 250 251 251 // Start with preemption disabled until ready 252 preemption _enabled = false;253 disable_preempt_count = 1;252 preemption.enabled = false; 253 preemption.disable_count = 1; 254 254 255 255 // Initialize the event kernel … … 290 290 // Used by thread to control when they want to receive preemption signals 291 291 void ?{}( preemption_scope & this, processor * proc ) { 292 (this.alarm){ proc, zero_time, zero_time};292 (this.alarm){ proc, 0`cfa_s, 0`cfa_s }; 293 293 this.proc = proc; 294 294 this.proc->preemption_alarm = &this.alarm; … … 300 300 disable_interrupts(); 301 301 302 update_preemption( this.proc, zero_time);302 update_preemption( this.proc, 0`cfa_s ); 303 303 } 304 304 … … 330 330 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread); 331 331 332 preemption _in_progress = true; // Sync flag : prevent recursive calls to the signal handler332 preemption.in_progress = true; // Sync flag : prevent recursive calls to the signal handler 333 333 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 334 preemption _in_progress = false; // Clear the in progress flag334 preemption.in_progress = false; // Clear the in progress flag 335 335 336 336 // Preemption can occur here
Note: See TracChangeset
for help on using the changeset viewer.