Ignore:
Timestamp:
Mar 30, 2018, 7:21:28 PM (6 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
Children:
273cde6
Parents:
fae90d5
Message:

thread-local storage converted to structure and thread-local macros for access

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/preemption.c

    rfae90d5 rb10affd  
    1010// Created On       : Mon Jun 5 14:20:42 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Tue Mar 27 11:28:51 2018
    13 // Update Count     : 24
     12// Last Modified On : Fri Mar 30 17:27:43 2018
     13// Update Count     : 31
    1414//
    1515
     
    150150        // Disable interrupts by incrementing the counter
    151151        void disable_interrupts() {
    152                 preemption_state.enabled = false;
    153                 __attribute__((unused)) unsigned short new_val = preemption_state.disable_count + 1;
    154                 preemption_state.disable_count = new_val;
     152                TL_GET( preemption_state ).enabled = false;
     153                __attribute__((unused)) unsigned short new_val = TL_GET( preemption_state ).disable_count + 1;
     154                TL_GET( preemption_state ).disable_count = new_val;
    155155                verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
    156156        }
     
    159159        // If counter reaches 0, execute any pending CtxSwitch
    160160        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    161                 processor   * proc = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
    162                 thread_desc * thrd = this_thread;         // Cache the thread now since interrupts can start happening after the atomic add
    163 
    164                 unsigned short prev = preemption_state.disable_count;
    165                 preemption_state.disable_count -= 1;
     161                processor   * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add
     162                thread_desc * thrd = TL_GET( this_thread );       // Cache the thread now since interrupts can start happening after the atomic add
     163
     164                unsigned short prev = TL_GET( preemption_state ).disable_count;
     165                TL_GET( preemption_state ).disable_count -= 1;
    166166                verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
    167167
    168168                // Check if we need to prempt the thread because an interrupt was missed
    169169                if( prev == 1 ) {
    170                         preemption_state.enabled = true;
     170                        TL_GET( preemption_state ).enabled = true;
    171171                        if( proc->pending_preemption ) {
    172172                                proc->pending_preemption = false;
     
    182182        // Don't execute any pending CtxSwitch even if counter reaches 0
    183183        void enable_interrupts_noPoll() {
    184                 unsigned short prev = preemption_state.disable_count;
    185                 preemption_state.disable_count -= 1;
     184                unsigned short prev = TL_GET( preemption_state ).disable_count;
     185                TL_GET( preemption_state ).disable_count -= 1;
    186186                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    187187                if( prev == 1 ) {
    188                         preemption_state.enabled = true;
     188                        TL_GET( preemption_state ).enabled = true;
    189189                }
    190190        }
     
    236236// If false : preemption is unsafe and marked as pending
    237237static inline bool preemption_ready() {
    238         bool ready = preemption_state.enabled && !preemption_state.in_progress; // Check if preemption is safe
    239         this_processor->pending_preemption = !ready;                        // Adjust the pending flag accordingly
     238        bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe
     239        TL_GET( this_processor )->pending_preemption = !ready;                  // Adjust the pending flag accordingly
    240240        return ready;
    241241}
     
    251251
    252252        // Start with preemption disabled until ready
    253         preemption_state.enabled = false;
    254         preemption_state.disable_count = 1;
     253        TL_GET( preemption_state ).enabled = false;
     254        TL_GET( preemption_state ).disable_count = 1;
    255255
    256256        // Initialize the event kernel
     
    317317        // before the kernel thread has even started running. When that happens an iterrupt
    318318        // we a null 'this_processor' will be caught, just ignore it.
    319         if(!this_processor) return;
     319        if(!TL_GET( this_processor )) return;
    320320
    321321        choose(sfp->si_value.sival_int) {
    322322                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    323                 case PREEMPT_TERMINATE: verify(this_processor->do_terminate);
     323                case PREEMPT_TERMINATE: verify(TL_GET( this_processor )->do_terminate);
    324324                default:
    325325                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    331331        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread);
    332332
    333         preemption_state.in_progress = true;                      // Sync flag : prevent recursive calls to the signal handler
     333        TL_GET( preemption_state ).in_progress = true;  // Sync flag : prevent recursive calls to the signal handler
    334334        signal_unblock( SIGUSR1 );                          // We are about to CtxSwitch out of the signal handler, let other handlers in
    335         preemption_state.in_progress = false;                    // Clear the in progress flag
     335        TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag
    336336
    337337        // Preemption can occur here
    338338
    339         BlockInternal( (thread_desc*)this_thread );        // Do the actual CtxSwitch
     339        BlockInternal( (thread_desc*)TL_GET( this_thread ) ); // Do the actual CtxSwitch
    340340}
    341341
Note: See TracChangeset for help on using the changeset viewer.