Ignore:
Timestamp:
Apr 24, 2021, 7:45:02 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
fb0be05
Parents:
89eff25 (diff), 254ad1b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
libcfa/src/concurrency
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/alarm.cfa

    r89eff25 rcfff639  
    116116        unlock( event_kernel->lock );
    117117        this->set = true;
    118         enable_interrupts( __cfaabi_dbg_ctx );
     118        enable_interrupts();
    119119}
    120120
     
    127127        }
    128128        unlock( event_kernel->lock );
    129         enable_interrupts( __cfaabi_dbg_ctx );
     129        enable_interrupts();
    130130        this->set = false;
    131131}
  • libcfa/src/concurrency/clib/cfathread.cfa

    r89eff25 rcfff639  
    117117
    118118        this_thrd->state = Ready;
    119         enable_interrupts( __cfaabi_dbg_ctx );
     119        enable_interrupts();
    120120}
    121121
  • libcfa/src/concurrency/invoke.c

    r89eff25 rcfff639  
    3434
    3535extern void disable_interrupts() OPTIONAL_THREAD;
    36 extern void enable_interrupts( __cfaabi_dbg_ctx_param );
     36extern void enable_interrupts( _Bool poll );
    3737
    3838void __cfactx_invoke_coroutine(
     
    8282) {
    8383        // Officially start the thread by enabling preemption
    84         enable_interrupts( __cfaabi_dbg_ctx );
     84        enable_interrupts( true );
    8585
    8686        // Call the main of the thread
  • libcfa/src/concurrency/io.cfa

    r89eff25 rcfff639  
    244244                        // Allocation was successful
    245245                        __STATS__( true, io.alloc.fast += 1; )
    246                         enable_interrupts( __cfaabi_dbg_ctx );
     246                        enable_interrupts();
    247247
    248248                        __cfadbg_print_safe(io, "Kernel I/O : fast allocation successful from ring %d\n", ctx->fd);
     
    256256                // Fast path failed, fallback on arbitration
    257257                __STATS__( true, io.alloc.slow += 1; )
    258                 enable_interrupts( __cfaabi_dbg_ctx );
     258                enable_interrupts();
    259259
    260260                $io_arbiter * ioarb = proc->cltr->io.arbiter;
     
    314314                        // Mark the instance as no longer in-use, re-enable interrupts and return
    315315                        __STATS__( true, io.submit.fast += 1; )
    316                         enable_interrupts( __cfaabi_dbg_ctx );
     316                        enable_interrupts();
    317317
    318318                        __cfadbg_print_safe(io, "Kernel I/O : submitted on fast path\n");
     
    322322                // Fast path failed, fallback on arbitration
    323323                __STATS__( true, io.submit.slow += 1; )
    324                 enable_interrupts( __cfaabi_dbg_ctx );
     324                enable_interrupts();
    325325
    326326                __cfadbg_print_safe(io, "Kernel I/O : falling back on arbiter for submission\n");
  • libcfa/src/concurrency/kernel.cfa

    r89eff25 rcfff639  
    115115static $thread * __next_thread(cluster * this);
    116116static $thread * __next_thread_slow(cluster * this);
     117static inline bool __must_unpark( $thread * thrd ) __attribute((nonnull(1)));
    117118static void __run_thread(processor * this, $thread * dst);
    118119static void __wake_one(cluster * cltr);
     
    130131extern void __disable_interrupts_hard();
    131132extern void __enable_interrupts_hard();
     133
     134static inline void __disable_interrupts_checked() {
     135        /* paranoid */ verify( __preemption_enabled() );
     136        disable_interrupts();
     137        /* paranoid */ verify( ! __preemption_enabled() );
     138}
     139
     140static inline void __enable_interrupts_checked( bool poll = true ) {
     141        /* paranoid */ verify( ! __preemption_enabled() );
     142        enable_interrupts( poll );
     143        /* paranoid */ verify( __preemption_enabled() );
     144}
    132145
    133146//=============================================================================================
     
    452465                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    453466                        // The thread was preempted, reschedule it and reset the flag
    454                         __schedule_thread( thrd_dst );
     467                        schedule_thread$( thrd_dst );
    455468                        break RUNNING;
    456469                }
     
    541554        /* paranoid */ verify( ! __preemption_enabled() );
    542555        /* paranoid */ verify( kernelTLS().this_proc_id );
     556        /* paranoid */ verify( ready_schedule_islocked());
    543557        /* paranoid */ verify( thrd );
    544558        /* paranoid */ verify( thrd->state != Halted );
     
    560574        __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )
    561575
    562         ready_schedule_lock();
    563                 // push the thread to the cluster ready-queue
    564                 push( cl, thrd );
    565 
    566                 // variable thrd is no longer safe to use
    567 
    568                 // wake the cluster using the save variable.
    569                 __wake_one( cl );
    570         ready_schedule_unlock();
     576        // push the thread to the cluster ready-queue
     577        push( cl, thrd );
     578
     579        // variable thrd is no longer safe to use
     580        thrd = 0xdeaddeaddeaddeadp;
     581
     582        // wake the cluster using the save variable.
     583        __wake_one( cl );
    571584
    572585        #if !defined(__CFA_NO_STATISTICS__)
     
    585598        #endif
    586599
    587         /* paranoid */ verify( ! __preemption_enabled() );
     600        /* paranoid */ verify( ready_schedule_islocked());
     601        /* paranoid */ verify( ! __preemption_enabled() );
     602}
     603
     604void schedule_thread$( $thread * thrd ) {
     605        ready_schedule_lock();
     606                __schedule_thread( thrd );
     607        ready_schedule_unlock();
    588608}
    589609
     
    623643}
    624644
    625 void unpark( $thread * thrd ) {
    626         if( !thrd ) return;
    627 
     645static inline bool __must_unpark( $thread * thrd ) {
    628646        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
    629647        switch(old_ticket) {
    630648                case TICKET_RUNNING:
    631649                        // Wake won the race, the thread will reschedule/rerun itself
    632                         break;
     650                        return false;
    633651                case TICKET_BLOCKED:
    634652                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
    635653                        /* paranoid */ verify( thrd->state == Blocked );
    636 
    637                         {
    638                                 /* paranoid */ verify( publicTLS_get(this_proc_id) );
    639                                 disable_interrupts();
    640 
    641                                 /* paranoid */ verify( ! __preemption_enabled() );
    642 
    643                                 // Wake lost the race,
    644                                 __schedule_thread( thrd );
    645 
    646                                 /* paranoid */ verify( ! __preemption_enabled() );
    647 
    648                                 enable_interrupts_noPoll();
    649                                 /* paranoid */ verify( publicTLS_get(this_proc_id) );
    650                         }
    651 
    652                         break;
     654                        return true;
    653655                default:
    654656                        // This makes no sense, something is wrong abort
     
    657659}
    658660
     661void unpark( $thread * thrd ) {
     662        if( !thrd ) return;
     663
     664        if(__must_unpark(thrd)) {
     665                disable_interrupts();
     666                        // Wake lost the race,
     667                        schedule_thread$( thrd );
     668                enable_interrupts(false);
     669        }
     670}
     671
    659672void park( void ) {
    660         /* paranoid */ verify( __preemption_enabled() );
    661         disable_interrupts();
    662         /* paranoid */ verify( ! __preemption_enabled() );
    663         /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
    664 
    665         returnToKernel();
    666 
    667         /* paranoid */ verify( ! __preemption_enabled() );
    668         enable_interrupts( __cfaabi_dbg_ctx );
    669         /* paranoid */ verify( __preemption_enabled() );
     673        __disable_interrupts_checked();
     674                /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
     675                returnToKernel();
     676        __enable_interrupts_checked();
    670677
    671678}
     
    707714// KERNEL ONLY
    708715bool force_yield( __Preemption_Reason reason ) {
    709         /* paranoid */ verify( __preemption_enabled() );
    710         disable_interrupts();
    711         /* paranoid */ verify( ! __preemption_enabled() );
    712 
    713         $thread * thrd = kernelTLS().this_thread;
    714         /* paranoid */ verify(thrd->state == Active);
    715 
    716         // SKULLDUGGERY: It is possible that we are preempting this thread just before
    717         // it was going to park itself. If that is the case and it is already using the
    718         // intrusive fields then we can't use them to preempt the thread
    719         // If that is the case, abandon the preemption.
    720         bool preempted = false;
    721         if(thrd->link.next == 0p) {
    722                 preempted = true;
    723                 thrd->preempted = reason;
    724                 returnToKernel();
    725         }
    726 
    727         /* paranoid */ verify( ! __preemption_enabled() );
    728         enable_interrupts_noPoll();
    729         /* paranoid */ verify( __preemption_enabled() );
    730 
     716        __disable_interrupts_checked();
     717                $thread * thrd = kernelTLS().this_thread;
     718                /* paranoid */ verify(thrd->state == Active);
     719
     720                // SKULLDUGGERY: It is possible that we are preempting this thread just before
     721                // it was going to park itself. If that is the case and it is already using the
     722                // intrusive fields then we can't use them to preempt the thread
     723                // If that is the case, abandon the preemption.
     724                bool preempted = false;
     725                if(thrd->link.next == 0p) {
     726                        preempted = true;
     727                        thrd->preempted = reason;
     728                        returnToKernel();
     729                }
     730        __enable_interrupts_checked( false );
    731731        return preempted;
    732732}
     
    773773        __cfadbg_print_safe(runtime_core, "Kernel : waking Processor %p\n", this);
    774774
    775         disable_interrupts();
     775        __disable_interrupts_checked();
    776776                /* paranoid */ verify( ! __preemption_enabled() );
    777777                eventfd_t val;
    778778                val = 1;
    779779                eventfd_write( this->idle, val );
    780         enable_interrupts( __cfaabi_dbg_ctx );
     780        __enable_interrupts_checked();
    781781}
    782782
  • libcfa/src/concurrency/kernel/fwd.hfa

    r89eff25 rcfff639  
    108108
    109109        extern void disable_interrupts();
    110         extern void enable_interrupts_noPoll();
    111         extern void enable_interrupts( __cfaabi_dbg_ctx_param );
     110        extern void enable_interrupts( bool poll = false );
    112111
    113112        extern "Cforall" {
     
    403402                                        __VA_ARGS__ \
    404403                                } \
    405                                 if( !(in_kernel) ) enable_interrupts( __cfaabi_dbg_ctx ); \
     404                                if( !(in_kernel) ) enable_interrupts(); \
    406405                        }
    407406                #else
  • libcfa/src/concurrency/kernel/startup.cfa

    r89eff25 rcfff639  
    225225        // Add the main thread to the ready queue
    226226        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    227         __schedule_thread(mainThread);
     227        schedule_thread$(mainThread);
    228228
    229229        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
     
    238238
    239239        /* paranoid */ verify( ! __preemption_enabled() );
    240         enable_interrupts( __cfaabi_dbg_ctx );
     240        enable_interrupts();
    241241        /* paranoid */ verify( __preemption_enabled() );
    242242
     
    532532        disable_interrupts();
    533533                init( this, name, _cltr, initT );
    534         enable_interrupts( __cfaabi_dbg_ctx );
     534        enable_interrupts();
    535535
    536536        __cfadbg_print_safe(runtime_core, "Kernel : Starting core %p\n", &this);
     
    559559        disable_interrupts();
    560560                deinit( this );
    561         enable_interrupts( __cfaabi_dbg_ctx );
     561        enable_interrupts();
    562562}
    563563
     
    597597        // Unlock the RWlock
    598598        ready_mutate_unlock( last_size );
    599         enable_interrupts_noPoll(); // Don't poll, could be in main cluster
     599        enable_interrupts( false ); // Don't poll, could be in main cluster
    600600}
    601601
     
    612612        // Unlock the RWlock
    613613        ready_mutate_unlock( last_size );
    614         enable_interrupts_noPoll(); // Don't poll, could be in main cluster
     614        enable_interrupts( false ); // Don't poll, could be in main cluster
    615615
    616616        #if !defined(__CFA_NO_STATISTICS__)
  • libcfa/src/concurrency/kernel_private.hfa

    r89eff25 rcfff639  
    2929extern "C" {
    3030        void disable_interrupts() OPTIONAL_THREAD;
    31         void enable_interrupts_noPoll();
    32         void enable_interrupts( __cfaabi_dbg_ctx_param );
    33 }
    34 
    35 void __schedule_thread( $thread * )
    36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
    37         __attribute__((nonnull (1)))
    38 #endif
    39 ;
     31        void enable_interrupts( bool poll = true );
     32}
     33
     34void schedule_thread$( $thread * ) __attribute__((nonnull (1)));
    4035
    4136extern bool __preemption_enabled();
  • libcfa/src/concurrency/preemption.cfa

    r89eff25 rcfff639  
    315315        // Enable interrupts by decrementing the counter
    316316        // If counter reaches 0, execute any pending __cfactx_switch
    317         void enable_interrupts( __cfaabi_dbg_ctx_param ) {
     317        void enable_interrupts( bool poll ) {
    318318                // Cache the processor now since interrupts can start happening after the atomic store
    319319                processor   * proc = __cfaabi_tls.this_processor;
    320                 /* paranoid */ verify( proc );
     320                /* paranoid */ verify( !poll || proc );
    321321
    322322                with( __cfaabi_tls.preemption_state ){
     
    340340                                // Signal the compiler that a fence is needed but only for signal handlers
    341341                                __atomic_signal_fence(__ATOMIC_RELEASE);
    342                                 if( proc->pending_preemption ) {
     342                                if( poll && proc->pending_preemption ) {
    343343                                        proc->pending_preemption = false;
    344344                                        force_yield( __POLL_PREEMPTION );
    345345                                }
    346346                        }
    347                 }
    348 
    349                 // For debugging purposes : keep track of the last person to enable the interrupts
    350                 __cfaabi_dbg_debug_do( proc->last_enable = caller; )
    351         }
    352 
    353         // Disable interrupts by incrementint the counter
    354         // Don't execute any pending __cfactx_switch even if counter reaches 0
    355         void enable_interrupts_noPoll() {
    356                 unsigned short prev = __cfaabi_tls.preemption_state.disable_count;
    357                 __cfaabi_tls.preemption_state.disable_count -= 1;
    358                 // If this triggers someone is enabled already enabled interrupts
    359                 /* paranoid */ verifyf( prev != 0u, "Incremented from %u\n", prev );
    360                 if( prev == 1 ) {
    361                         #if GCC_VERSION > 50000
    362                                 static_assert(__atomic_always_lock_free(sizeof(__cfaabi_tls.preemption_state.enabled), &__cfaabi_tls.preemption_state.enabled), "Must be lock-free");
    363                         #endif
    364                         // Set enabled flag to true
    365                         // should be atomic to avoid preemption in the middle of the operation.
    366                         // use memory order RELAXED since there is no inter-thread on this variable requirements
    367                         __atomic_store_n(&__cfaabi_tls.preemption_state.enabled, true, __ATOMIC_RELAXED);
    368 
    369                         // Signal the compiler that a fence is needed but only for signal handlers
    370                         __atomic_signal_fence(__ATOMIC_RELEASE);
    371347                }
    372348        }
  • libcfa/src/concurrency/thread.cfa

    r89eff25 rcfff639  
    136136        /* paranoid */ verify( this_thrd->context.SP );
    137137
    138         __schedule_thread( this_thrd );
    139         enable_interrupts( __cfaabi_dbg_ctx );
     138        schedule_thread$( this_thrd );
     139        enable_interrupts();
    140140}
    141141
     
    170170        disable_interrupts();
    171171        uint64_t ret = __tls_rand();
    172         enable_interrupts( __cfaabi_dbg_ctx );
     172        enable_interrupts();
    173173        return ret;
    174174}
Note: See TracChangeset for help on using the changeset viewer.