Changeset b0c7419 for libcfa/src


Ignore:
Timestamp:
Feb 14, 2020, 3:28:17 PM (5 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
8c50aed
Parents:
50b8885
Message:

Yield now uses force_yield instead of park/unpark.
Final ctxswitch of a thread now uses ad-hoc mechanism instead of park/unpark.

Location:
libcfa/src/concurrency
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/invoke.h

    r50b8885 rb0c7419  
    9292        };
    9393
    94         enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun, Reschedule };
    95         enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION };
     94        enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun };
     95        enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION };
    9696
    9797        struct coroutine_desc {
  • libcfa/src/concurrency/kernel.cfa

    r50b8885 rb0c7419  
    213213        this.cltr = &cltr;
    214214        terminated{ 0 };
     215        destroyer = 0p;
    215216        do_terminate = false;
    216217        preemption_alarm = 0p;
     
    320321        coroutine_desc * proc_cor = get_coroutine(this->runner);
    321322
    322         // Reset the terminating actions here
    323         this->finish.action_code = No_Action;
    324 
    325323        // Update global state
    326324        kernelTLS.this_thread = thrd_dst;
     
    334332                if(unlikely(thrd_dst->preempted)) {
    335333                        thrd_dst->preempted = __NO_PREEMPTION;
    336                         verify(thrd_dst->state == Active || thrd_dst->state == Rerun || thrd_dst->state == Reschedule);
     334                        verify(thrd_dst->state == Active || thrd_dst->state == Rerun);
    337335                } else {
    338336                        verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive);
     
    372370                                // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
    373371                                thrd_dst->state = Halted;
     372
     373                                // We may need to wake someone up here since
     374                                unpark( this->destroyer );
     375                                this->destroyer = 0p;
    374376                                break RUNNING;
    375377                        case Active:
     
    380382                                // In this case, just run it again.
    381383                                continue RUNNING;
    382                         case Reschedule:
    383                                 // This is case 3, someone tried to run this before it finished blocking
    384                                 // but it must go through the ready-queue
    385                                 thrd_dst->state = Inactive;  /*restore invariant */
    386                                 ScheduleThread( thrd_dst );
    387                                 break RUNNING;
    388384                        default:
    389385                                // This makes no sense, something is wrong abort
     
    397393
    398394// KERNEL_ONLY
    399 static void returnToKernel() {
     395void returnToKernel() {
    400396        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    401397        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
     
    553549        /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
    554550                          "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
    555         /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule,
     551        /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun,
    556552                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
    557553        /* paranoid */ #endif
     
    589585}
    590586
    591 void unpark( thread_desc * thrd, bool must_yield ) {
     587void unpark( thread_desc * thrd ) {
    592588        if( !thrd ) return;
    593 
    594         enum coroutine_state new_state = must_yield ? Reschedule : Rerun;
    595589
    596590        disable_interrupts();
    597591        static_assert(sizeof(thrd->state) == sizeof(int));
    598         enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, new_state, __ATOMIC_SEQ_CST);
     592        enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST);
    599593        switch(old_state) {
    600594                case Active:
     
    609603                        break;
    610604                case Rerun:
    611                 case Reschedule:
    612605                        abort("More than one thread attempted to schedule thread %p\n", thrd);
    613606                        break;
     
    637630
    638631// KERNEL ONLY
    639 void LeaveThread() {
     632void __leave_thread() {
    640633        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    641634        returnToKernel();
     635        abort();
    642636}
    643637
     
    649643
    650644        thread_desc * thrd = kernelTLS.this_thread;
    651         /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule);
     645        /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun);
    652646
    653647        // SKULLDUGGERY: It is possible that we are preempting this thread just before
  • libcfa/src/concurrency/kernel.hfa

    r50b8885 rb0c7419  
    4545extern struct cluster * mainCluster;
    4646
    47 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };
    48 
    49 typedef void (*__finish_callback_fptr_t)(void);
    50 
    51 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)
    52 struct FinishAction {
    53         FinishOpCode action_code;
    54         /*
    55         // Union of possible actions
    56         union {
    57                 // Option 1 : locks and threads
    58                 struct {
    59                         // 1 thread or N thread
    60                         union {
    61                                 thread_desc * thrd;
    62                                 struct {
    63                                         thread_desc ** thrds;
    64                                         unsigned short thrd_count;
    65                                 };
    66                         };
    67                         // 1 lock or N lock
    68                         union {
    69                                 __spinlock_t * lock;
    70                                 struct {
    71                                         __spinlock_t ** locks;
    72                                         unsigned short lock_count;
    73                                 };
    74                         };
    75                 };
    76                 // Option 2 : action pointer
    77                 __finish_callback_fptr_t callback;
    78         };
    79         /*/
    80         thread_desc * thrd;
    81         thread_desc ** thrds;
    82         unsigned short thrd_count;
    83         __spinlock_t * lock;
    84         __spinlock_t ** locks;
    85         unsigned short lock_count;
    86         __finish_callback_fptr_t callback;
    87         //*/
    88 };
    89 static inline void ?{}(FinishAction & this) {
    90         this.action_code = No_Action;
    91         this.thrd = 0p;
    92         this.lock = 0p;
    93 }
    94 static inline void ^?{}(FinishAction &) {}
    95 
    9647// Processor
    9748coroutine processorCtx_t {
     
    11667        // RunThread data
    11768        // Action to do after a thread is ran
    118         struct FinishAction finish;
     69        thread_desc * destroyer;
    11970
    12071        // Preemption data
  • libcfa/src/concurrency/kernel_private.hfa

    r50b8885 rb0c7419  
    3434
    3535//Block current thread and release/wake-up the following resources
    36 void LeaveThread();
    37 
    38 bool force_yield( enum __Preemption_Reason );
     36void __leave_thread() __attribute__((noreturn));
    3937
    4038//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/monitor.cfa

    r50b8885 rb0c7419  
    277277                disable_interrupts();
    278278
    279                 thrd->self_cor.state = Halted;
     279                thrd->state = Halted;
    280280
    281281                /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
     
    296296                // Unpark the next owner if needed
    297297                /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    298                 unpark( new_owner );
    299 
    300                 // Leave the thread, this will unlock the spinlock
    301                 // Use leave thread instead of park which is
    302                 // specialized for this case
    303                 LeaveThread();
     298                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     299                /* paranoid */ verify( ! kernelTLS.this_processor->destroyer );
     300                /* paranoid */ verify( thrd->state == Halted );
     301
     302                kernelTLS.this_processor->destroyer = new_owner;
     303
     304                // Leave the thread
     305                __leave_thread();
    304306
    305307                // Control flow should never reach here!
  • libcfa/src/concurrency/thread.hfa

    r50b8885 rb0c7419  
    101101//----------
    102102// Unpark a thread, if the thread is already blocked, schedule it
    103 //                  if the thread is not yet block, signal that it should rerun immediately or reschedule itself
    104 void unpark( thread_desc * this, bool must_yield );
    105 
    106 static inline void unpark( thread_desc * this ) { unpark( this, false ); }
     103//                  if the thread is not yet block, signal that it should rerun immediately
     104void unpark( thread_desc * this );
    107105
    108106forall( dtype T | is_thread(T) )
    109 static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ), false );}
    110 
    111 forall( dtype T | is_thread(T) )
    112 static inline void unpark( T & this, bool must_yield ) { if(!&this) return; unpark( get_thread( this ), must_yield );}
     107static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );}
    113108
    114109//----------
    115110// Yield: force thread to block and be rescheduled
     111bool force_yield( enum __Preemption_Reason );
     112
    116113static inline void yield() {
    117         unpark( active_thread(), true );
    118         park();
     114        force_yield(__MANUAL_PREEMPTION);
    119115}
    120116
Note: See TracChangeset for help on using the changeset viewer.