Changeset 534d84e for src/libcfa/concurrency/preemption.c
- Timestamp:
- Apr 28, 2018, 10:35:51 AM (6 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- b6dc097
- Parents:
- 9997fee (diff), a0c7d5cc (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/preemption.c
r9997fee r534d84e 149 149 // Disable interrupts by incrementing the counter 150 150 void disable_interrupts() { 151 TL_GET( preemption_state ).enabled = false; 152 __attribute__((unused)) unsigned short new_val = TL_GET( preemption_state ).disable_count + 1; 153 TL_GET( preemption_state ).disable_count = new_val; 154 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 151 with( TL_GET( preemption_state ) ) { 152 enabled = false; 153 __attribute__((unused)) unsigned short new_val = disable_count + 1; 154 disable_count = new_val; 155 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 156 } 155 157 } 156 158 … … 161 163 thread_desc * thrd = TL_GET( this_thread ); // Cache the thread now since interrupts can start happening after the atomic add 162 164 163 unsigned short prev = TL_GET( preemption_state ).disable_count; 164 TL_GET( preemption_state ).disable_count -= 1; 165 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 166 167 // Check if we need to prempt the thread because an interrupt was missed 168 if( prev == 1 ) { 169 TL_GET( preemption_state ).enabled = true; 170 if( proc->pending_preemption ) { 171 proc->pending_preemption = false; 172 BlockInternal( thrd ); 165 with( TL_GET( preemption_state ) ){ 166 unsigned short prev = disable_count; 167 disable_count -= 1; 168 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 169 170 // Check if we need to prempt the thread because an interrupt was missed 171 if( prev == 1 ) { 172 enabled = true; 173 if( proc->pending_preemption ) { 174 proc->pending_preemption = false; 175 BlockInternal( thrd ); 176 } 173 177 } 174 178 } … … 328 332 if( !preemption_ready() ) { return; } 329 333 330 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread);334 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", TL_GET( this_processor ), TL_GET( this_thread ) ); 331 335 332 336 TL_GET( preemption_state ).in_progress = true; // Sync flag : prevent recursive calls to the signal handler
Note: See TracChangeset
for help on using the changeset viewer.