Changeset e873838


Ignore:
Timestamp:
Nov 2, 2020, 12:44:43 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
58688bf, 82f791f
Parents:
f7136f7
Message:

Removed unpark and added support for unpark from the kernel (removing the distinction between the two

Location:
libcfa/src/concurrency
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/io/setup.cfa

    rf7136f7 re873838  
    149149                id.full_proc = false;
    150150                id.id = doregister(&id);
     151                kernelTLS.this_proc_id = &id;
    151152                __cfaabi_dbg_print_safe( "Kernel : IO poller thread starting\n" );
    152153
     
    180181                                        kernelTLS.this_stats = io_ctx->self.curr_cluster->stats;
    181182                                #endif
    182                                 __post( io_ctx->sem, &id );
     183                                post( io_ctx->sem );
    183184                        }
    184185                }
     
    235236                        if( thrd.state == Ready || thrd.preempted != __NO_PREEMPTION ) {
    236237
    237                                 ready_schedule_lock( (struct __processor_id_t *)active_processor() );
     238                                ready_schedule_lock();
    238239
    239240                                        // This is the tricky case
     
    253254                                        thrd.preempted = __NO_PREEMPTION;
    254255
    255                                 ready_schedule_unlock( (struct __processor_id_t *)active_processor() );
     256                                ready_schedule_unlock();
    256257
    257258                                // Pretend like the thread was blocked all along
  • libcfa/src/concurrency/kernel.cfa

    rf7136f7 re873838  
    108108static $thread * __next_thread_slow(cluster * this);
    109109static void __run_thread(processor * this, $thread * dst);
    110 static void __wake_one(struct __processor_id_t * id, cluster * cltr);
     110static void __wake_one(cluster * cltr);
    111111
    112112static void push  (__cluster_idles & idles, processor & proc);
     
    282282                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    283283                        // The thread was preempted, reschedule it and reset the flag
    284                         __schedule_thread( (__processor_id_t*)this, thrd_dst );
     284                        __schedule_thread( thrd_dst );
    285285                        break RUNNING;
    286286                }
     
    358358// Scheduler routines
    359359// KERNEL ONLY
    360 void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) {
     360void __schedule_thread( $thread * thrd ) {
    361361        /* paranoid */ verify( thrd );
    362362        /* paranoid */ verify( thrd->state != Halted );
    363363        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     364        /* paranoid */ verify( kernelTLS.this_proc_id );
    364365        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
    365366        /* paranoid */  if( thrd->state == Blocked || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     
    374375        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
    375376
    376         ready_schedule_lock  ( id );
     377        ready_schedule_lock();
    377378                push( thrd->curr_cluster, thrd );
    378                 __wake_one(id, thrd->curr_cluster);
    379         ready_schedule_unlock( id );
     379                __wake_one(thrd->curr_cluster);
     380        ready_schedule_unlock();
    380381
    381382        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    384385// KERNEL ONLY
    385386static inline $thread * __next_thread(cluster * this) with( *this ) {
    386         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    387 
    388         ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
     387        /* paranoid */ verify( kernelTLS.this_proc_id );
     388        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     389
     390        ready_schedule_lock();
    389391                $thread * thrd = pop( this );
    390         ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
    391 
    392         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     392        ready_schedule_unlock();
     393
     394        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     395        /* paranoid */ verify( kernelTLS.this_proc_id );
    393396        return thrd;
    394397}
     
    396399// KERNEL ONLY
    397400static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
    398         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    399 
    400         ready_schedule_lock  ( (__processor_id_t*)kernelTLS.this_processor );
     401        /* paranoid */ verify( kernelTLS.this_proc_id );
     402        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     403
     404        ready_schedule_lock();
    401405                $thread * thrd = pop_slow( this );
    402         ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor );
    403 
    404         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     406        ready_schedule_unlock();
     407
     408        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     409        /* paranoid */ verify( kernelTLS.this_proc_id );
    405410        return thrd;
    406411}
    407412
    408 // KERNEL ONLY unpark with out disabling interrupts
    409 void __unpark(  struct __processor_id_t * id, $thread * thrd ) {
     413void unpark( $thread * thrd ) {
     414        if( !thrd ) return;
     415
     416        /* paranoid */ verify( kernelTLS.this_proc_id );
     417        bool full = kernelTLS.this_proc_id->full_proc;
     418        if(full) disable_interrupts();
     419
     420        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    410421        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
    411422        switch(old_ticket) {
     
    418429
    419430                        // Wake lost the race,
    420                         __schedule_thread( id, thrd );
     431                        __schedule_thread( thrd );
    421432                        break;
    422433                default:
     
    424435                        abort("Thread %p (%s) has mismatch park/unpark\n", thrd, thrd->self_cor.name);
    425436        }
    426 }
    427 
    428 void unpark( $thread * thrd ) {
    429         if( !thrd ) return;
    430 
    431         disable_interrupts();
    432         __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd );
    433         enable_interrupts( __cfaabi_dbg_ctx );
     437        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     438
     439        if(full) enable_interrupts( __cfaabi_dbg_ctx );
     440        /* paranoid */ verify( kernelTLS.this_proc_id );
    434441}
    435442
     
    505512//=============================================================================================
    506513// Wake a thread from the front if there are any
    507 static void __wake_one(struct __processor_id_t * id, cluster * this) {
    508         /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    509         /* paranoid */ verify( ready_schedule_islocked( id ) );
     514static void __wake_one(cluster * this) {
     515        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     516        /* paranoid */ verify( ready_schedule_islocked() );
    510517
    511518        // Check if there is a sleeping processor
     
    525532        #endif
    526533
    527         /* paranoid */ verify( ready_schedule_islocked( id ) );
     534        /* paranoid */ verify( ready_schedule_islocked() );
    528535        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    529536
  • libcfa/src/concurrency/kernel/fwd.hfa

    rf7136f7 re873838  
    3535        extern "Cforall" {
    3636                extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
    37                         struct $thread    * volatile this_thread;
    38                         struct processor  * volatile this_processor;
    39                         struct __stats_t  * volatile this_stats;
     37                        struct $thread          * volatile this_thread;
     38                        struct processor        * volatile this_processor;
     39                        struct __processor_id_t * volatile this_proc_id;
     40                        struct __stats_t        * volatile this_stats;
    4041
    4142                        struct {
  • libcfa/src/concurrency/kernel/startup.cfa

    rf7136f7 re873838  
    122122        NULL,
    123123        NULL,
     124        NULL,
    124125        { 1, false, false },
    125126};
     
    212213        //initialize the global state variables
    213214        kernelTLS.this_processor = mainProcessor;
     215        kernelTLS.this_proc_id   = (__processor_id_t*)mainProcessor;
    214216        kernelTLS.this_thread    = mainThread;
    215217
     
    227229        // Add the main thread to the ready queue
    228230        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    229         __schedule_thread((__processor_id_t *)mainProcessor, mainThread);
     231        __schedule_thread(mainThread);
    230232
    231233        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
     
    324326        processor * proc = (processor *) arg;
    325327        kernelTLS.this_processor = proc;
     328        kernelTLS.this_proc_id   = (__processor_id_t*)proc;
    326329        kernelTLS.this_thread    = 0p;
    327330        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
  • libcfa/src/concurrency/kernel_private.hfa

    rf7136f7 re873838  
    3333}
    3434
    35 void __schedule_thread( struct __processor_id_t *, $thread * )
     35void __schedule_thread( $thread * )
    3636#if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
    37         __attribute__((nonnull (2)))
     37        __attribute__((nonnull (1)))
    3838#endif
    3939;
     
    6363)
    6464
    65 // KERNEL ONLY unpark with out disabling interrupts
    66 void __unpark( struct __processor_id_t *, $thread * thrd );
    67 
    6865#define TICKET_BLOCKED (-1) // thread is blocked
    6966#define TICKET_RUNNING ( 0) // thread is running
    7067#define TICKET_UNBLOCK ( 1) // thread should ignore next block
    71 
    72 static inline bool __post(single_sem & this, struct __processor_id_t * id) {
    73         for() {
    74                 struct $thread * expected = this.ptr;
    75                 if(expected == 1p) return false;
    76                 if(expected == 0p) {
    77                         if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    78                                 return false;
    79                         }
    80                 }
    81                 else {
    82                         if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    83                                 __unpark( id, expected );
    84                                 return true;
    85                         }
    86                 }
    87         }
    88 }
    8968
    9069//-----------------------------------------------------------------------------
     
    201180// Reader side : acquire when using the ready queue to schedule but not
    202181//  creating/destroying queues
    203 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) {
    204         unsigned iproc = proc->id;
    205         /*paranoid*/ verify(data[iproc].handle == proc);
     182static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
     183        /*paranoid*/ verify( kernelTLS.this_proc_id );
     184
     185        unsigned iproc = kernelTLS.this_proc_id->id;
     186        /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
    206187        /*paranoid*/ verify(iproc < ready);
    207188
     
    225206}
    226207
    227 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) {
    228         unsigned iproc = proc->id;
    229         /*paranoid*/ verify(data[iproc].handle == proc);
     208static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
     209        /*paranoid*/ verify( kernelTLS.this_proc_id );
     210
     211        unsigned iproc = kernelTLS.this_proc_id->id;
     212        /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id);
    230213        /*paranoid*/ verify(iproc < ready);
    231214        /*paranoid*/ verify(data[iproc].lock);
     
    239222
    240223#ifdef __CFA_WITH_VERIFY__
    241         static inline bool ready_schedule_islocked( struct __processor_id_t * proc) {
     224        static inline bool ready_schedule_islocked(void) {
     225                /*paranoid*/ verify( kernelTLS.this_proc_id );
     226                __processor_id_t * proc = kernelTLS.this_proc_id;
    242227                return __scheduler_lock->data[proc->id].owned;
    243228        }
  • libcfa/src/concurrency/preemption.cfa

    rf7136f7 re873838  
    3838// FwdDeclarations : timeout handlers
    3939static void preempt( processor   * this );
    40 static void timeout( struct __processor_id_t * id, $thread * this );
     40static void timeout( $thread * this );
    4141
    4242// FwdDeclarations : Signal handlers
     
    9191
    9292// Tick one frame of the Discrete Event Simulation for alarms
    93 static void tick_preemption( struct __processor_id_t * id ) {
     93static void tick_preemption(void) {
    9494        alarm_node_t * node = 0p;                                                       // Used in the while loop but cannot be declared in the while condition
    9595        alarm_list_t * alarms = &event_kernel->alarms;          // Local copy for ease of reading
     
    109109                }
    110110                else {
    111                         timeout( id, node->thrd );
     111                        timeout( node->thrd );
    112112                }
    113113
     
    270270
    271271// reserved for future use
    272 static void timeout( struct __processor_id_t * id, $thread * this ) {
     272static void timeout( $thread * this ) {
    273273        #if !defined( __CFA_NO_STATISTICS__ )
    274274                kernelTLS.this_stats = this->curr_cluster->stats;
    275275        #endif
    276         __unpark( id, this );
     276        unpark( this );
    277277}
    278278
     
    413413        id.full_proc = false;
    414414        id.id = doregister(&id);
     415        kernelTLS.this_proc_id = &id;
    415416
    416417        // Block sigalrms to control when they arrive
     
    458459                        // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" );
    459460                        lock( event_kernel->lock __cfaabi_dbg_ctx2 );
    460                         tick_preemption( &id );
     461                        tick_preemption();
    461462                        unlock( event_kernel->lock );
    462463                        break;
  • libcfa/src/concurrency/thread.cfa

    rf7136f7 re873838  
    127127        verify( this_thrd->context.SP );
    128128
    129         __schedule_thread( (__processor_id_t *)kernelTLS.this_processor, this_thrd);
     129        __schedule_thread( this_thrd );
    130130        enable_interrupts( __cfaabi_dbg_ctx );
    131131}
Note: See TracChangeset for help on using the changeset viewer.