Ignore:
Timestamp:
May 18, 2018, 2:09:21 PM (7 years ago)
Author:
Aaron Moss <a3moss@…>
Branches:
new-env, with_gc
Children:
2472a19
Parents:
f6f0cca3 (diff), c7d8100c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge remote-tracking branch 'origin/master' into with_gc

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/preemption.c

    rf6f0cca3 rff29f08  
    149149        // Disable interrupts by incrementing the counter
    150150        void disable_interrupts() {
    151                 TL_GET( preemption_state ).enabled = false;
    152                 __attribute__((unused)) unsigned short new_val = TL_GET( preemption_state ).disable_count + 1;
    153                 TL_GET( preemption_state ).disable_count = new_val;
    154                 verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
     151                with( kernelTLS.preemption_state ) {
     152                        enabled = false;
     153                        __attribute__((unused)) unsigned short new_val = disable_count + 1;
     154                        disable_count = new_val;
     155                        verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
     156                }
    155157        }
    156158
     
    158160        // If counter reaches 0, execute any pending CtxSwitch
    159161        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    160                 processor   * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add
    161                 thread_desc * thrd = TL_GET( this_thread );       // Cache the thread now since interrupts can start happening after the atomic add
    162 
    163                 unsigned short prev = TL_GET( preemption_state ).disable_count;
    164                 TL_GET( preemption_state ).disable_count -= 1;
    165                 verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
    166 
    167                 // Check if we need to prempt the thread because an interrupt was missed
    168                 if( prev == 1 ) {
    169                         TL_GET( preemption_state ).enabled = true;
    170                         if( proc->pending_preemption ) {
    171                                 proc->pending_preemption = false;
    172                                 BlockInternal( thrd );
     162                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add
     163                thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic add
     164
     165                with( kernelTLS.preemption_state ){
     166                        unsigned short prev = disable_count;
     167                        disable_count -= 1;
     168                        verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
     169
     170                        // Check if we need to prempt the thread because an interrupt was missed
     171                        if( prev == 1 ) {
     172                                enabled = true;
     173                                if( proc->pending_preemption ) {
     174                                        proc->pending_preemption = false;
     175                                        BlockInternal( thrd );
     176                                }
    173177                        }
    174178                }
     
    181185        // Don't execute any pending CtxSwitch even if counter reaches 0
    182186        void enable_interrupts_noPoll() {
    183                 unsigned short prev = TL_GET( preemption_state ).disable_count;
    184                 TL_GET( preemption_state ).disable_count -= 1;
     187                unsigned short prev = kernelTLS.preemption_state.disable_count;
     188                kernelTLS.preemption_state.disable_count -= 1;
    185189                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    186190                if( prev == 1 ) {
    187                         TL_GET( preemption_state ).enabled = true;
     191                        kernelTLS.preemption_state.enabled = true;
    188192                }
    189193        }
     
    230234}
    231235
    232 
     236// KERNEL ONLY
    233237// Check if a CtxSwitch signal handler shoud defer
    234238// If true  : preemption is safe
    235239// If false : preemption is unsafe and marked as pending
    236240static inline bool preemption_ready() {
    237         bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe
    238         TL_GET( this_processor )->pending_preemption = !ready;                  // Adjust the pending flag accordingly
     241        // Check if preemption is safe
     242        bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;
     243
     244        // Adjust the pending flag accordingly
     245        kernelTLS.this_processor->pending_preemption = !ready;
    239246        return ready;
    240247}
     
    250257
    251258        // Start with preemption disabled until ready
    252         TL_GET( preemption_state ).enabled = false;
    253         TL_GET( preemption_state ).disable_count = 1;
     259        kernelTLS.preemption_state.enabled = false;
     260        kernelTLS.preemption_state.disable_count = 1;
    254261
    255262        // Initialize the event kernel
     
    316323        // before the kernel thread has even started running. When that happens an iterrupt
    317324        // we a null 'this_processor' will be caught, just ignore it.
    318         if(!TL_GET( this_processor )) return;
     325        if(! kernelTLS.this_processor ) return;
    319326
    320327        choose(sfp->si_value.sival_int) {
    321328                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    322                 case PREEMPT_TERMINATE: verify(TL_GET( this_processor )->do_terminate);
     329                case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate);
    323330                default:
    324331                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    328335        if( !preemption_ready() ) { return; }
    329336
    330         __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread);
    331 
    332         TL_GET( preemption_state ).in_progress = true;  // Sync flag : prevent recursive calls to the signal handler
    333         signal_unblock( SIGUSR1 );                          // We are about to CtxSwitch out of the signal handler, let other handlers in
    334         TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag
     337        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread );
     338
     339        // Sync flag : prevent recursive calls to the signal handler
     340        kernelTLS.preemption_state.in_progress = true;
     341
     342        // We are about to CtxSwitch out of the signal handler, let other handlers in
     343        signal_unblock( SIGUSR1 );
     344
     345        // TODO: this should go in finish action
     346        // Clear the in progress flag
     347        kernelTLS.preemption_state.in_progress = false;
    335348
    336349        // Preemption can occur here
    337350
    338         BlockInternal( (thread_desc*)TL_GET( this_thread ) ); // Do the actual CtxSwitch
     351        BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch
    339352}
    340353
     
    344357        // Block sigalrms to control when they arrive
    345358        sigset_t mask;
     359        sigfillset(&mask);
     360        if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
     361            abort( "internal error, pthread_sigmask" );
     362        }
     363
    346364        sigemptyset( &mask );
    347365        sigaddset( &mask, SIGALRM );
    348 
    349         if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
    350             abort( "internal error, pthread_sigmask" );
    351         }
    352366
    353367        // Main loop
     
    400414}
    401415
     416//=============================================================================================
     417// Kernel Signal Debug
     418//=============================================================================================
     419
     420void __cfaabi_check_preemption() {
     421        bool ready = kernelTLS.preemption_state.enabled;
     422        if(!ready) { abort("Preemption should be ready"); }
     423
     424        sigset_t oldset;
     425        int ret;
     426        ret = sigprocmask(0, NULL, &oldset);
     427        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
     428
     429        ret = sigismember(&oldset, SIGUSR1);
     430        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
     431
     432        if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
     433}
     434
    402435// Local Variables: //
    403436// mode: c //
Note: See TracChangeset for help on using the changeset viewer.