Changeset a3821fa


Ignore:
Timestamp:
Apr 24, 2021, 7:03:47 PM (3 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
734908c
Parents:
c6c7e6c
Message:

Changed enable interrupts:

  • no longer save the caller for debugging
  • now polls based on parameter passed in
Location:
libcfa/src
Files:
12 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/bits/locks.hfa

    rc6c7e6c ra3821fa  
    3737        extern "C" {
    3838                extern void disable_interrupts() OPTIONAL_THREAD;
    39                 extern void enable_interrupts_noPoll() OPTIONAL_THREAD;
     39                extern void enable_interrupts( bool poll = true ) OPTIONAL_THREAD;
    4040
    4141                #ifdef __CFA_DEBUG__
     
    5757                        __cfaabi_dbg_record_lock( this, caller );
    5858                } else {
    59                         enable_interrupts_noPoll();
     59                        enable_interrupts( false );
    6060                }
    6161                return result;
     
    9090        static inline void unlock( __spinlock_t & this ) {
    9191                __atomic_clear( &this.lock, __ATOMIC_RELEASE );
    92                 enable_interrupts_noPoll();
     92                enable_interrupts( false );
    9393        }
    9494#endif
  • libcfa/src/concurrency/alarm.cfa

    rc6c7e6c ra3821fa  
    116116        unlock( event_kernel->lock );
    117117        this->set = true;
    118         enable_interrupts( __cfaabi_dbg_ctx );
     118        enable_interrupts();
    119119}
    120120
     
    127127        }
    128128        unlock( event_kernel->lock );
    129         enable_interrupts( __cfaabi_dbg_ctx );
     129        enable_interrupts();
    130130        this->set = false;
    131131}
  • libcfa/src/concurrency/clib/cfathread.cfa

    rc6c7e6c ra3821fa  
    117117
    118118        this_thrd->state = Ready;
    119         enable_interrupts( __cfaabi_dbg_ctx );
     119        enable_interrupts();
    120120}
    121121
  • libcfa/src/concurrency/invoke.c

    rc6c7e6c ra3821fa  
    3434
    3535extern void disable_interrupts() OPTIONAL_THREAD;
    36 extern void enable_interrupts( __cfaabi_dbg_ctx_param );
     36extern void enable_interrupts( _Bool poll );
    3737
    3838void __cfactx_invoke_coroutine(
     
    8282) {
    8383        // Officially start the thread by enabling preemption
    84         enable_interrupts( __cfaabi_dbg_ctx );
     84        enable_interrupts( true );
    8585
    8686        // Call the main of the thread
  • libcfa/src/concurrency/io.cfa

    rc6c7e6c ra3821fa  
    244244                        // Allocation was successful
    245245                        __STATS__( true, io.alloc.fast += 1; )
    246                         enable_interrupts( __cfaabi_dbg_ctx );
     246                        enable_interrupts();
    247247
    248248                        __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
     
    256256                // Fast path failed, fallback on arbitration
    257257                __STATS__( true, io.alloc.slow += 1; )
    258                 enable_interrupts( __cfaabi_dbg_ctx );
     258                enable_interrupts();
    259259
    260260                $io_arbiter * ioarb = proc->cltr->io.arbiter;
     
    314314                        // Mark the instance as no longer in-use, re-enable interrupts and return
    315315                        __STATS__( true, io.submit.fast += 1; )
    316                         enable_interrupts( __cfaabi_dbg_ctx );
     316                        enable_interrupts();
    317317
    318318                        __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
     
    322322                // Fast path failed, fallback on arbitration
    323323                __STATS__( true, io.submit.slow += 1; )
    324                 enable_interrupts( __cfaabi_dbg_ctx );
     324                enable_interrupts();
    325325
    326326                __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
  • libcfa/src/concurrency/kernel.cfa

    rc6c7e6c ra3821fa  
    126126extern void __disable_interrupts_hard();
    127127extern void __enable_interrupts_hard();
     128
     129static inline void __disable_interrupts_checked() {
     130        /* paranoid */ verify( __preemption_enabled() );
     131        disable_interrupts();
     132        /* paranoid */ verify( ! __preemption_enabled() );
     133}
     134
     135static inline void __enable_interrupts_checked( bool poll = true ) {
     136        /* paranoid */ verify( ! __preemption_enabled() );
     137        enable_interrupts( poll );
     138        /* paranoid */ verify( __preemption_enabled() );
     139}
    128140
    129141//=============================================================================================
     
    517529
    518530        if(__must_unpark(thrd)) {
    519                 /* paranoid */ verify( publicTLS_get(this_proc_id) );
    520531                disable_interrupts();
    521 
    522                 /* paranoid */ verify( ! __preemption_enabled() );
    523 
    524                 // Wake lost the race,
    525                 __schedule_thread( thrd );
    526 
    527                 /* paranoid */ verify( ! __preemption_enabled() );
    528 
    529                 enable_interrupts_noPoll();
    530                 /* paranoid */ verify( publicTLS_get(this_proc_id) );
     532                        // Wake lost the race,
     533                        __schedule_thread( thrd );
     534                enable_interrupts(false);
    531535        }
    532536}
    533537
    534538void park( void ) {
    535         /* paranoid */ verify( __preemption_enabled() );
    536         disable_interrupts();
    537         /* paranoid */ verify( ! __preemption_enabled() );
    538         /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
    539 
    540         returnToKernel();
    541 
    542         /* paranoid */ verify( ! __preemption_enabled() );
    543         enable_interrupts( __cfaabi_dbg_ctx );
    544         /* paranoid */ verify( __preemption_enabled() );
     539        __disable_interrupts_checked();
     540                /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
     541                returnToKernel();
     542        __enable_interrupts_checked();
    545543
    546544}
     
    582580// KERNEL ONLY
    583581bool force_yield( __Preemption_Reason reason ) {
    584         /* paranoid */ verify( __preemption_enabled() );
    585         disable_interrupts();
    586         /* paranoid */ verify( ! __preemption_enabled() );
    587 
    588         $thread * thrd = kernelTLS().this_thread;
    589         /* paranoid */ verify(thrd->state == Active);
    590 
    591         // SKULLDUGGERY: It is possible that we are preempting this thread just before
    592         // it was going to park itself. If that is the case and it is already using the
    593         // intrusive fields then we can't use them to preempt the thread
    594         // If that is the case, abandon the preemption.
    595         bool preempted = false;
    596         if(thrd->link.next == 0p) {
    597                 preempted = true;
    598                 thrd->preempted = reason;
    599                 returnToKernel();
    600         }
    601 
    602         /* paranoid */ verify( ! __preemption_enabled() );
    603         enable_interrupts_noPoll();
    604         /* paranoid */ verify( __preemption_enabled() );
    605 
     582        __disable_interrupts_checked();
     583                $thread * thrd = kernelTLS().this_thread;
     584                /* paranoid */ verify(thrd->state == Active);
     585
     586                // SKULLDUGGERY: It is possible that we are preempting this thread just before
     587                // it was going to park itself. If that is the case and it is already using the
     588                // intrusive fields then we can't use them to preempt the thread
     589                // If that is the case, abandon the preemption.
     590                bool preempted = false;
     591                if(thrd->link.next == 0p) {
     592                        preempted = true;
     593                        thrd->preempted = reason;
     594                        returnToKernel();
     595                }
     596        __enable_interrupts_checked( false );
    606597        return preempted;
    607598}
     
    648639        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    649640
    650         disable_interrupts();
     641        __disable_interrupts_checked();
    651642                /* paranoid */ verify( ! __preemption_enabled() );
    652643                eventfd_t val;
    653644                val = 1;
    654645                eventfd_write( this->idle, val );
    655         enable_interrupts( __cfaabi_dbg_ctx );
     646        __enable_interrupts_checked();
    656647}
    657648
  • libcfa/src/concurrency/kernel/fwd.hfa

    rc6c7e6c ra3821fa  
    108108
    109109        extern void disable_interrupts();
    110         extern void enable_interrupts_noPoll();
    111         extern void enable_interrupts( __cfaabi_dbg_ctx_param );
     110        extern void enable_interrupts( bool poll = false );
    112111
    113112        extern "Cforall" {
     
    403402                                        __VA_ARGS__ \
    404403                                } \
    405                                 if( !(in_kernel) ) enable_interrupts( __cfaabi_dbg_ctx ); \
     404                                if( !(in_kernel) ) enable_interrupts(); \
    406405                        }
    407406                #else
  • libcfa/src/concurrency/kernel/startup.cfa

    rc6c7e6c ra3821fa  
    238238
    239239        /* paranoid */ verify( ! __preemption_enabled() );
    240         enable_interrupts( __cfaabi_dbg_ctx );
     240        enable_interrupts();
    241241        /* paranoid */ verify( __preemption_enabled() );
    242242
     
    530530        disable_interrupts();
    531531                init( this, name, _cltr, initT );
    532         enable_interrupts( __cfaabi_dbg_ctx );
     532        enable_interrupts();
    533533
    534534        __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
     
    557557        disable_interrupts();
    558558                deinit( this );
    559         enable_interrupts( __cfaabi_dbg_ctx );
     559        enable_interrupts();
    560560}
    561561
     
    595595        // Unlock the RWlock
    596596        ready_mutate_unlock( last_size );
    597         enable_interrupts_noPoll(); // Don't poll, could be in main cluster
     597        enable_interrupts( false ); // Don't poll, could be in main cluster
    598598}
    599599
     
    610610        // Unlock the RWlock
    611611        ready_mutate_unlock( last_size );
    612         enable_interrupts_noPoll(); // Don't poll, could be in main cluster
     612        enable_interrupts( false ); // Don't poll, could be in main cluster
    613613
    614614        #if !defined(__CFA_NO_STATISTICS__)
  • libcfa/src/concurrency/kernel_private.hfa

    rc6c7e6c ra3821fa  
    2929extern "C" {
    3030        void disable_interrupts() OPTIONAL_THREAD;
    31         void enable_interrupts_noPoll();
    32         void enable_interrupts( __cfaabi_dbg_ctx_param );
     31        void enable_interrupts( bool poll = true );
    3332}
    3433
  • libcfa/src/concurrency/preemption.cfa

    rc6c7e6c ra3821fa  
    315315        // Enable interrupts by decrementing the counter
    316316        // If counter reaches 0, execute any pending __cfactx_switch
    317         void enable_interrupts( __cfaabi_dbg_ctx_param ) {
     317        void enable_interrupts( bool poll ) {
    318318                // Cache the processor now since interrupts can start happening after the atomic store
    319319                processor   * proc = __cfaabi_tls.this_processor;
    320                 /* paranoid */ verify( proc );
     320                /* paranoid */ verify( !poll || proc );
    321321
    322322                with( __cfaabi_tls.preemption_state ){
     
    340340                                // Signal the compiler that a fence is needed but only for signal handlers
    341341                                __atomic_signal_fence(__ATOMIC_RELEASE);
    342                                 if( proc->pending_preemption ) {
     342                                if( poll && proc->pending_preemption ) {
    343343                                        proc->pending_preemption = false;
    344344                                        force_yield( __POLL_PREEMPTION );
    345345                                }
    346346                        }
    347                 }
    348 
    349                 // For debugging purposes : keep track of the last person to enable the interrupts
    350                 __cfaabi_dbg_debug_do( proc->last_enable = caller; )
    351         }
    352 
    353         // Disable interrupts by incrementint the counter
    354         // Don't execute any pending __cfactx_switch even if counter reaches 0
    355         void enable_interrupts_noPoll() {
    356                 unsigned short prev = __cfaabi_tls.preemption_state.disable_count;
    357                 __cfaabi_tls.preemption_state.disable_count -= 1;
    358                 // If this triggers someone is enabled already enabled interrupts
    359                 /* paranoid */ verifyf( prev != 0u, "Incremented from %u\n", prev );
    360                 if( prev == 1 ) {
    361                         #if GCC_VERSION > 50000
    362                                 static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free");
    363                         #endif
    364                         // Set enabled flag to true
    365                         // should be atomic to avoid preemption in the middle of the operation.
    366                         // use memory order RELAXED since there is no inter-thread on this variable requirements
    367                         __atomic_store_n(&__cfaabi_tls.preemption_state.enabled, true, __ATOMIC_RELAXED);
    368 
    369                         // Signal the compiler that a fence is needed but only for signal handlers
    370                         __atomic_signal_fence(__ATOMIC_RELEASE);
    371347                }
    372348        }
  • libcfa/src/concurrency/thread.cfa

    rc6c7e6c ra3821fa  
    135135
    136136        __schedule_thread( this_thrd );
    137         enable_interrupts( __cfaabi_dbg_ctx );
     137        enable_interrupts();
    138138}
    139139
     
    168168        disable_interrupts();
    169169        uint64_t ret = __tls_rand();
    170         enable_interrupts( __cfaabi_dbg_ctx );
     170        enable_interrupts();
    171171        return ret;
    172172}
  • libcfa/src/startup.cfa

    rc6c7e6c ra3821fa  
    3939
    4040    void disable_interrupts() __attribute__(( weak )) {}
    41     void enable_interrupts_noPoll() __attribute__(( weak )) {}
     41    void enable_interrupts() __attribute__(( weak )) {}
    4242} // extern "C"
    4343
Note: See TracChangeset for help on using the changeset viewer.