Changeset 3381ed7


Ignore:
Timestamp:
Feb 13, 2020, 4:18:07 PM (4 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
50b8885
Parents:
9f575ea
Message:

Added park/unpark primitives thread and removed BlockInternal?.
Converted monitors to use park unpark.
Intrusive Queue now mark next field when thread is inside queue.
Added several asserts to kernel and monitor.
Added a few tests for park and unpark.

Files:
4 added
14 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/bits/containers.hfa

    r9f575ea r3381ed7  
    146146        static inline forall( dtype T | is_node(T) ) {
    147147                void ?{}( __queue(T) & this ) with( this ) {
    148                         head{ 0p };
     148                        head{ 1p };
    149149                        tail{ &head };
     150                        verify(*tail == 1p);
    150151                }
    151152
    152153                void append( __queue(T) & this, T * val ) with( this ) {
    153154                        verify(tail != 0p);
     155                        verify(*tail == 1p);
    154156                        *tail = val;
    155157                        tail = &get_next( *val );
     158                        *tail = 1p;
    156159                }
    157160
    158161                T * pop_head( __queue(T) & this ) {
     162                        verify(*this.tail == 1p);
    159163                        T * head = this.head;
    160                         if( head ) {
     164                        if( head != 1p ) {
    161165                                this.head = get_next( *head );
    162                                 if( !get_next( *head ) ) {
     166                                if( get_next( *head ) == 1p ) {
    163167                                        this.tail = &this.head;
    164168                                }
    165169                                get_next( *head ) = 0p;
    166                         }
    167                         return head;
     170                                verify(*this.tail == 1p);
     171                                return head;
     172                        }
     173                        verify(*this.tail == 1p);
     174                        return 0p;
    168175                }
    169176
     
    180187                        get_next( *val ) = 0p;
    181188
    182                         verify( (head == 0p) == (&head == tail) );
    183                         verify( *tail == 0p );
     189                        verify( (head == 1p) == (&head == tail) );
     190                        verify( *tail == 1p );
    184191                        return val;
    185192                }
  • libcfa/src/bits/locks.hfa

    r9f575ea r3381ed7  
    6060        }
    6161
    62         extern void yield( unsigned int );
    63 
    6462        static inline void ?{}( __spinlock_t & this ) {
    6563                this.lock = 0;
     
    6866        // Lock the spinlock, return false if already acquired
    6967        static inline bool try_lock  ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {
     68                disable_interrupts();
    7069                bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0);
    7170                if( result ) {
    72                         disable_interrupts();
    7371                        __cfaabi_dbg_record( this, caller );
     72                } else {
     73                        enable_interrupts_noPoll();
    7474                }
    7575                return result;
     
    8383                #endif
    8484
     85                disable_interrupts();
    8586                for ( unsigned int i = 1;; i += 1 ) {
    8687                        if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break;
     
    9899                        #endif
    99100                }
    100                 disable_interrupts();
    101101                __cfaabi_dbg_record( this, caller );
    102102        }
    103103
    104104        static inline void unlock( __spinlock_t & this ) {
     105                __atomic_clear( &this.lock, __ATOMIC_RELEASE );
    105106                enable_interrupts_noPoll();
    106                 __atomic_clear( &this.lock, __ATOMIC_RELEASE );
    107107        }
    108108
  • libcfa/src/concurrency/invoke.h

    r9f575ea r3381ed7  
    9393
    9494        enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun, Reschedule };
     95        enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION };
     96        enum __Owner_Reason { __NO_OWNER, __ENTER_FREE, __ENTER_ACCEPT, __ENTER_DTOR_FREE, __ENTER_DTOR_ACCEPT, __ENTER_SIGNAL_BLOCK, __WAITFOR, __LEAVE, __LEAVE_THREAD, __WAIT };
    9597
    9698        struct coroutine_desc {
     
    134136                struct thread_desc * owner;
    135137
     138                enum __Owner_Reason owner_reason;
     139
    136140                // queue of threads that are blocked waiting for the monitor
    137141                __queue_t(struct thread_desc) entry_queue;
     
    165169                // current execution status for coroutine
    166170                volatile int state;
    167                 int preempted;
     171                enum __Preemption_Reason preempted;
    168172
    169173                //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
  • libcfa/src/concurrency/kernel.cfa

    r9f575ea r3381ed7  
    258258// Kernel Scheduling logic
    259259//=============================================================================================
     260static thread_desc * nextThread(cluster * this);
    260261static void runThread(processor * this, thread_desc * dst);
    261 static void finishRunning(processor * this);
    262262static void halt(processor * this);
    263263
     
    286286
    287287                        if(readyThread) {
    288                                 verify( ! kernelTLS.preemption_state.enabled );
     288                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     289                                /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted);
     290                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
    289291
    290292                                runThread(this, readyThread);
    291293
    292                                 verify( ! kernelTLS.preemption_state.enabled );
    293 
    294                                 //Some actions need to be taken from the kernel
    295                                 finishRunning(this);
     294                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    296295
    297296                                spin_count = 0;
    298297                        } else {
    299298                                // spin(this, &spin_count);
    300                                 halt(this);
     299                                // halt(this);
    301300                        }
    302301                }
     
    332331
    333332        // Actually run the thread
    334         RUN:
    335         {
     333        RUNNING:  while(true) {
    336334                if(unlikely(thrd_dst->preempted)) {
    337                         thrd_dst->preempted = false;
     335                        thrd_dst->preempted = __NO_PREEMPTION;
     336                        verify(thrd_dst->state == Active || thrd_dst->state == Rerun || thrd_dst->state == Reschedule);
    338337                } else {
     338                        verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive);
    339339                        thrd_dst->state = Active;
    340340                }
     341
     342                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    341343
    342344                // set context switch to the thread that the processor is executing
     
    344346                CtxSwitch( &proc_cor->context, &thrd_dst->context );
    345347                // when CtxSwitch returns we are back in the processor coroutine
    346         }
    347 
    348         // We just finished running a thread, there are a few things that could have happened.
    349         // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
    350         // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
    351         // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way
    352         // 4 - Preempted
    353         // In case 1, we may have won a race so we can't write to the state again.
    354         // In case 2, we lost the race so we now own the thread.
    355         // In case 3, we lost the race but can just reschedule the thread.
    356 
    357         if(unlikely(thrd_dst->preempted)) {
    358                 // The thread was preempted, reschedule it and reset the flag
    359                 ScheduleThread( thrd_dst );
    360 
    361                 // Just before returning to the processor, set the processor coroutine to active
    362                 proc_cor->state = Active;
    363                 return;
    364         }
    365 
    366         // set state of processor coroutine to active and the thread to inactive
    367         enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST);
    368         switch(old_state) {
    369                 case Halted:
    370                         // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
    371                         thrd_dst->state = Halted;
    372                         break;
    373                 case Active:
    374                         // This is case 1, the regular case, nothing more is needed
    375                         break;
    376                 case Rerun:
    377                         // This is case 2, the racy case, someone tried to run this thread before it finished blocking
    378                         // In this case, just run it again.
    379                         goto RUN;
    380                 case Reschedule:
    381                         // This is case 3, someone tried to run this before it finished blocking
    382                         // but it must go through the ready-queue
    383                         thrd_dst->state = Inactive;  /*restore invariant */
     348
     349                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     350
     351
     352                // We just finished running a thread, there are a few things that could have happened.
     353                // 1 - Regular case : the thread has blocked and now one has scheduled it yet.
     354                // 2 - Racy case    : the thread has blocked but someone has already tried to schedule it.
     355                // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way
     356                // 4 - Preempted
     357                // In case 1, we may have won a race so we can't write to the state again.
     358                // In case 2, we lost the race so we now own the thread.
     359                // In case 3, we lost the race but can just reschedule the thread.
     360
     361                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
     362                        // The thread was preempted, reschedule it and reset the flag
    384363                        ScheduleThread( thrd_dst );
    385                         break;
    386                 case Inactive:
    387                 case Start:
    388                 case Primed:
    389                 default:
    390                         // This makes no sense, something is wrong abort
    391                         abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state);
     364                        break RUNNING;
     365                }
     366
     367                // set state of processor coroutine to active and the thread to inactive
     368                static_assert(sizeof(thrd_dst->state) == sizeof(int));
     369                enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST);
     370                switch(old_state) {
     371                        case Halted:
     372                                // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on
     373                                thrd_dst->state = Halted;
     374                                break RUNNING;
     375                        case Active:
     376                                // This is case 1, the regular case, nothing more is needed
     377                                break RUNNING;
     378                        case Rerun:
     379                                // This is case 2, the racy case, someone tried to run this thread before it finished blocking
     380                                // In this case, just run it again.
     381                                continue RUNNING;
     382                        case Reschedule:
     383                                // This is case 3, someone tried to run this before it finished blocking
     384                                // but it must go through the ready-queue
     385                                thrd_dst->state = Inactive;  /*restore invariant */
     386                                ScheduleThread( thrd_dst );
     387                                break RUNNING;
     388                        default:
     389                                // This makes no sense, something is wrong abort
     390                                abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state);
     391                }
    392392        }
    393393
     
    398398// KERNEL_ONLY
    399399static void returnToKernel() {
    400         verify( ! kernelTLS.preemption_state.enabled );
     400        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    401401        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
    402402        thread_desc * thrd_src = kernelTLS.this_thread;
     
    416416        }
    417417
    418         verify( ! kernelTLS.preemption_state.enabled );
    419 }
    420 
    421 // KERNEL_ONLY
    422 // Once a thread has finished running, some of
    423 // its final actions must be executed from the kernel
    424 static void finishRunning(processor * this) with( this->finish ) {
    425         verify( ! kernelTLS.preemption_state.enabled );
    426         verify( action_code == No_Action );
    427         choose( action_code ) {
    428         case No_Action:
    429                 break;
    430         case Release:
    431                 unlock( *lock );
    432         case Schedule:
    433                 ScheduleThread( thrd );
    434         case Release_Schedule:
    435                 unlock( *lock );
    436                 ScheduleThread( thrd );
    437         case Release_Multi:
    438                 for(int i = 0; i < lock_count; i++) {
    439                         unlock( *locks[i] );
    440                 }
    441         case Release_Multi_Schedule:
    442                 for(int i = 0; i < lock_count; i++) {
    443                         unlock( *locks[i] );
    444                 }
    445                 for(int i = 0; i < thrd_count; i++) {
    446                         ScheduleThread( thrds[i] );
    447                 }
    448         case Callback:
    449                 callback();
    450         default:
    451                 abort("KERNEL ERROR: Unexpected action to run after thread");
    452         }
     418        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    453419}
    454420
     
    581547//-----------------------------------------------------------------------------
    582548// Scheduler routines
    583 
    584549// KERNEL ONLY
    585550void ScheduleThread( thread_desc * thrd ) with( *thrd->curr_cluster ) {
    586551        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    587         /* paranoid */ verifyf( thrd->state == Inactive || thrd->state == Start || thrd->preempted, "state : %d, preempted %d\n", thrd->state, thrd->preempted);
     552        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
     553        /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION,
     554                          "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted );
     555        /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule,
     556                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
     557        /* paranoid */ #endif
    588558        /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
    589559
     
    608578
    609579// KERNEL ONLY
    610 thread_desc * nextThread(cluster * this) with( *this ) {
    611         verify( ! kernelTLS.preemption_state.enabled );
     580static thread_desc * nextThread(cluster * this) with( *this ) {
     581        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     582
    612583        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    613584        thread_desc * head = pop_head( ready_queue );
    614585        unlock( ready_queue_lock );
    615         verify( ! kernelTLS.preemption_state.enabled );
     586
     587        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    616588        return head;
    617589}
    618590
    619 void BlockInternal() {
     591void unpark( thread_desc * thrd, bool must_yield ) {
     592        if( !thrd ) return;
     593
     594        enum coroutine_state new_state = must_yield ? Reschedule : Rerun;
     595
    620596        disable_interrupts();
    621         verify( ! kernelTLS.preemption_state.enabled );
     597        static_assert(sizeof(thrd->state) == sizeof(int));
     598        enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, new_state, __ATOMIC_SEQ_CST);
     599        switch(old_state) {
     600                case Active:
     601                        // Wake won the race, the thread will reschedule/rerun itself
     602                        break;
     603                case Inactive:
     604                        /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION );
     605
     606                        // Wake lost the race,
     607                        thrd->state = Inactive;
     608                        ScheduleThread( thrd );
     609                        break;
     610                case Rerun:
     611                case Reschedule:
     612                        abort("More than one thread attempted to schedule thread %p\n", thrd);
     613                        break;
     614                case Halted:
     615                case Start:
     616                case Primed:
     617                default:
     618                        // This makes no sense, something is wrong abort
     619                        abort();
     620        }
     621        enable_interrupts( __cfaabi_dbg_ctx );
     622}
     623
     624void park( void ) {
     625        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     626        disable_interrupts();
     627        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     628        /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION );
     629
    622630        returnToKernel();
    623         verify( ! kernelTLS.preemption_state.enabled );
     631
     632        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    624633        enable_interrupts( __cfaabi_dbg_ctx );
    625 }
    626 
    627 void BlockInternal( __spinlock_t * lock ) {
     634        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     635
     636}
     637
     638// KERNEL ONLY
     639void LeaveThread() {
     640        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     641        returnToKernel();
     642}
     643
     644// KERNEL ONLY
     645bool force_yield( __Preemption_Reason reason ) {
     646        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
    628647        disable_interrupts();
    629         unlock( *lock );
    630 
    631         verify( ! kernelTLS.preemption_state.enabled );
    632         returnToKernel();
    633         verify( ! kernelTLS.preemption_state.enabled );
    634 
    635         enable_interrupts( __cfaabi_dbg_ctx );
    636 }
    637 
    638 void BlockInternal( thread_desc * thrd ) {
    639         disable_interrupts();
    640         WakeThread( thrd, false );
    641 
    642         verify( ! kernelTLS.preemption_state.enabled );
    643         returnToKernel();
    644         verify( ! kernelTLS.preemption_state.enabled );
    645 
    646         enable_interrupts( __cfaabi_dbg_ctx );
    647 }
    648 
    649 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) {
    650         disable_interrupts();
    651         unlock( *lock );
    652         WakeThread( thrd, false );
    653 
    654         verify( ! kernelTLS.preemption_state.enabled );
    655         returnToKernel();
    656         verify( ! kernelTLS.preemption_state.enabled );
    657 
    658         enable_interrupts( __cfaabi_dbg_ctx );
    659 }
    660 
    661 void BlockInternal(__spinlock_t * locks [], unsigned short count) {
    662         disable_interrupts();
    663         for(int i = 0; i < count; i++) {
    664                 unlock( *locks[i] );
    665         }
    666 
    667         verify( ! kernelTLS.preemption_state.enabled );
    668         returnToKernel();
    669         verify( ! kernelTLS.preemption_state.enabled );
    670 
    671         enable_interrupts( __cfaabi_dbg_ctx );
    672 }
    673 
    674 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
    675         disable_interrupts();
    676         for(int i = 0; i < lock_count; i++) {
    677                 unlock( *locks[i] );
    678         }
    679         for(int i = 0; i < thrd_count; i++) {
    680                 WakeThread( thrds[i], false );
    681         }
    682 
    683         verify( ! kernelTLS.preemption_state.enabled );
    684         returnToKernel();
    685         verify( ! kernelTLS.preemption_state.enabled );
    686 
    687         enable_interrupts( __cfaabi_dbg_ctx );
    688 }
    689 
    690 void BlockInternal(__finish_callback_fptr_t callback) {
    691         disable_interrupts();
    692         callback();
    693 
    694         verify( ! kernelTLS.preemption_state.enabled );
    695         returnToKernel();
    696         verify( ! kernelTLS.preemption_state.enabled );
    697 
    698         enable_interrupts( __cfaabi_dbg_ctx );
    699 }
    700 
    701 // KERNEL ONLY
    702 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
    703         verify( ! kernelTLS.preemption_state.enabled );
    704         unlock( *lock );
    705         WakeThread( thrd, false );
    706 
    707         returnToKernel();
     648        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     649
     650        thread_desc * thrd = kernelTLS.this_thread;
     651        /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule);
     652
     653        // SKULLDUGGERY: It is possible that we are preempting this thread just before
     654        // it was going to park itself. If that is the case and it is already using the
     655        // intrusive fields then we can't use them to preempt the thread
     656        // If that is the case, abandon the preemption.
     657        bool preempted = false;
     658        if(thrd->next == 0p) {
     659                preempted = true;
     660                thrd->preempted = reason;
     661                returnToKernel();
     662        }
     663
     664        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     665        enable_interrupts_noPoll();
     666        /* paranoid */ verify( kernelTLS.preemption_state.enabled );
     667
     668        return preempted;
    708669}
    709670
     
    939900
    940901                // atomically release spin lock and block
    941                 BlockInternal( &lock );
     902                unlock( lock );
     903                park();
    942904        }
    943905        else {
     
    958920
    959921        // make new owner
    960         WakeThread( thrd, false );
     922        unpark( thrd );
    961923}
    962924
  • libcfa/src/concurrency/kernel_private.hfa

    r9f575ea r3381ed7  
    3232
    3333void ScheduleThread( thread_desc * ) __attribute__((nonnull (1)));
    34 static inline void WakeThread( thread_desc * thrd, bool must_yield ) {
    35         if( !thrd ) return;
    36 
    37         enum coroutine_state new_state = must_yield ? Reschedule : Rerun;
    38 
    39         disable_interrupts();
    40         static_assert(sizeof(thrd->state) == sizeof(int));
    41         enum coroutine_state old_state = (enum coroutine_state)__atomic_exchange_n((volatile int *)&thrd->state, (int)new_state, __ATOMIC_SEQ_CST);
    42         switch(old_state) {
    43                 case Active:
    44                         // Wake won the race, the thread will reschedule/rerun itself
    45                         break;
    46                 case Inactive:
    47                         // Wake lost the race,
    48                         thrd->state = Inactive;
    49                         ScheduleThread( thrd );
    50                         break;
    51                 case Rerun:
    52                 case Reschedule:
    53                         abort("More than one thread attempted to schedule thread %p\n", thrd);
    54                         break;
    55                 case Halted:
    56                 case Start:
    57                 case Primed:
    58                 default:
    59                         // This makes no sense, something is wrong abort
    60                         abort();
    61         }
    62         enable_interrupts( __cfaabi_dbg_ctx );
    63 }
    64 thread_desc * nextThread(cluster * this);
    6534
    6635//Block current thread and release/wake-up the following resources
    67 void BlockInternal(void);
    68 void BlockInternal(__spinlock_t * lock);
    69 void BlockInternal(thread_desc * thrd);
    70 void BlockInternal(__spinlock_t * lock, thread_desc * thrd);
    71 void BlockInternal(__spinlock_t * locks [], unsigned short count);
    72 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count);
    73 void BlockInternal(__finish_callback_fptr_t callback);
    74 void LeaveThread(__spinlock_t * lock, thread_desc * thrd);
     36void LeaveThread();
     37
     38bool force_yield( enum __Preemption_Reason );
    7539
    7640//-----------------------------------------------------------------------------
  • libcfa/src/concurrency/monitor.cfa

    r9f575ea r3381ed7  
    2727//-----------------------------------------------------------------------------
    2828// Forward declarations
    29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );
    30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );
     29static inline void set_owner ( monitor_desc * this, thread_desc * owner, enum __Owner_Reason );
     30static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner, enum __Owner_Reason );
    3131static inline void set_mask  ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    3232static inline void reset_mask( monitor_desc * this );
    3333
    34 static inline thread_desc * next_thread( monitor_desc * this );
     34static inline thread_desc * next_thread( monitor_desc * this, enum __Owner_Reason );
    3535static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors );
    3636
     
    9494                if( !this->owner ) {
    9595                        // No one has the monitor, just take it
    96                         set_owner( this, thrd );
     96                        set_owner( this, thrd, __ENTER_FREE );
    9797
    9898                        __cfaabi_dbg_print_safe( "Kernel :  mon is free \n" );
     
    106106                else if( is_accepted( this, group) ) {
    107107                        // Some one was waiting for us, enter
    108                         set_owner( this, thrd );
     108                        set_owner( this, thrd, __ENTER_ACCEPT );
    109109
    110110                        // Reset mask
     
    117117
    118118                        // Some one else has the monitor, wait in line for it
     119                        /* paranoid */ verify( thrd->next == 0p );
    119120                        append( this->entry_queue, thrd );
    120 
    121                         BlockInternal( &this->lock );
     121                        /* paranoid */ verify( thrd->next == 1p );
     122
     123                        unlock( this->lock );
     124                        park();
    122125
    123126                        __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
    124127
    125                         // BlockInternal will unlock spinlock, no need to unlock ourselves
     128                        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    126129                        return;
    127130                }
    128131
    129132                __cfaabi_dbg_print_safe( "Kernel : %10p Entered  mon %p\n", thrd, this);
     133
     134                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     135                /* paranoid */ verify( this->lock.lock );
    130136
    131137                // Release the lock and leave
     
    147153
    148154                        // No one has the monitor, just take it
    149                         set_owner( this, thrd );
     155                        set_owner( this, thrd, __ENTER_DTOR_FREE );
     156
     157                        verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    150158
    151159                        unlock( this->lock );
     
    166174                        // Wake the thread that is waiting for this
    167175                        __condition_criterion_t * urgent = pop( this->signal_stack );
    168                         verify( urgent );
     176                        /* paranoid */ verify( urgent );
    169177
    170178                        // Reset mask
     
    175183
    176184                        // Some one else has the monitor, wait for him to finish and then run
    177                         BlockInternal( &this->lock, urgent->owner->waiting_thread );
     185                        unlock( this->lock );
     186
     187                        // Release the next thread
     188                        /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     189                        unpark( urgent->owner->waiting_thread );
     190
     191                        // Park current thread waiting
     192                        park();
    178193
    179194                        // Some one was waiting for us, enter
    180                         set_owner( this, thrd );
     195                        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    181196                }
    182197                else {
     
    187202
    188203                        // Some one else has the monitor, wait in line for it
     204                        /* paranoid */ verify( thrd->next == 0p );
    189205                        append( this->entry_queue, thrd );
    190                         BlockInternal( &this->lock );
    191 
    192                         // BlockInternal will unlock spinlock, no need to unlock ourselves
     206                        /* paranoid */ verify( thrd->next == 1p );
     207                        unlock( this->lock );
     208
     209                        // Park current thread waiting
     210                        park();
     211
     212                        /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    193213                        return;
    194214                }
     
    205225                __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
    206226
    207                 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     227                /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    208228
    209229                // Leaving a recursion level, decrement the counter
     
    219239
    220240                // Get the next thread, will be null on low contention monitor
    221                 thread_desc * new_owner = next_thread( this );
     241                thread_desc * new_owner = next_thread( this, __LEAVE );
     242
     243                // Check the new owner is consistent with who we wake-up
     244                // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor
     245                /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
    222246
    223247                // We can now let other threads in safely
     
    225249
    226250                //We need to wake-up the thread
    227                 WakeThread( new_owner, false );
     251                /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     252                unpark( new_owner );
    228253        }
    229254
     
    254279                thrd->self_cor.state = Halted;
    255280
    256                 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
     281                /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );
    257282
    258283                // Leaving a recursion level, decrement the counter
     
    264289
    265290                // Fetch the next thread, can be null
    266                 thread_desc * new_owner = next_thread( this );
     291                thread_desc * new_owner = next_thread( this, __LEAVE_THREAD );
     292
     293                // Release the monitor lock
     294                unlock( this->lock );
     295
     296                // Unpark the next owner if needed
     297                /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this );
     298                unpark( new_owner );
    267299
    268300                // Leave the thread, this will unlock the spinlock
    269                 // Use leave thread instead of BlockInternal which is
    270                 // specialized for this case and supports null new_owner
    271                 LeaveThread( &this->lock, new_owner );
     301                // Use leave thread instead of park which is
     302                // specialized for this case
     303                LeaveThread();
    272304
    273305                // Control flow should never reach here!
     
    400432        // Append the current wait operation to the ones already queued on the condition
    401433        // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion
     434        /* paranoid */ verify( waiter.next == 0p );
    402435        append( this.blocked, &waiter );
     436        /* paranoid */ verify( waiter.next == 1p );
    403437
    404438        // Lock all monitors (aggregates the locks as well)
     
    415449        // Remove any duplicate threads
    416450        for( __lock_size_t i = 0; i < count; i++) {
    417                 thread_desc * new_owner = next_thread( monitors[i] );
     451                thread_desc * new_owner = next_thread( monitors[i], __WAIT );
    418452                insert_unique( threads, thread_count, new_owner );
    419453        }
    420454
     455        // Unlock the locks, we don't need them anymore
     456        for(int i = 0; i < count; i++) {
     457                unlock( *locks[i] );
     458        }
     459
     460        // Wake the threads
     461        for(int i = 0; i < thread_count; i++) {
     462                unpark( threads[i] );
     463        }
     464
    421465        // Everything is ready to go to sleep
    422         BlockInternal( locks, count, threads, thread_count );
     466        park();
    423467
    424468        // We are back, restore the owners and recursions
     
    490534        //Find the thread to run
    491535        thread_desc * signallee = pop_head( this.blocked )->waiting_thread;
    492         set_owner( monitors, count, signallee );
     536        /* paranoid */ verify( signallee->next == 0p );
     537        set_owner( monitors, count, signallee, __ENTER_SIGNAL_BLOCK );
    493538
    494539        __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee );
    495540
     541        // unlock all the monitors
     542        unlock_all( locks, count );
     543
     544        // unpark the thread we signalled
     545        unpark( signallee );
     546
    496547        //Everything is ready to go to sleep
    497         BlockInternal( locks, count, &signallee, 1 );
     548        park();
    498549
    499550
     
    590641
    591642                                // Set the owners to be the next thread
    592                                 set_owner( monitors, count, next );
    593 
    594                                 // Everything is ready to go to sleep
    595                                 BlockInternal( locks, count, &next, 1 );
     643                                set_owner( monitors, count, next, __WAITFOR );
     644
     645                                // unlock all the monitors
     646                                unlock_all( locks, count );
     647
     648                                // unpark the thread we signalled
     649                                unpark( next );
     650
     651                                //Everything is ready to go to sleep
     652                                park();
    596653
    597654                                // We are back, restore the owners and recursions
     
    631688        }
    632689
     690        // unlock all the monitors
     691        unlock_all( locks, count );
     692
    633693        //Everything is ready to go to sleep
    634         BlockInternal( locks, count );
     694        park();
    635695
    636696
     
    649709// Utilities
    650710
    651 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {
    652         // __cfaabi_dbg_print_safe( "Kernal :   Setting owner of %p to %p ( was %p)\n", this, owner, this->owner );
     711static inline void set_owner( monitor_desc * this, thread_desc * owner, enum __Owner_Reason reason ) {
     712        /* paranoid */ verify( this->lock.lock );
    653713
    654714        //Pass the monitor appropriately
    655715        this->owner = owner;
     716        this->owner_reason = reason;
    656717
    657718        //We are passing the monitor to someone else, which means recursion level is not 0
     
    659720}
    660721
    661 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {
    662         monitors[0]->owner     = owner;
    663         monitors[0]->recursion = 1;
     722static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner, enum __Owner_Reason reason ) {
     723        /* paranoid */ verify ( monitors[0]->lock.lock );
     724        /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     725        monitors[0]->owner        = owner;
     726        monitors[0]->owner_reason = reason;
     727        monitors[0]->recursion    = 1;
    664728        for( __lock_size_t i = 1; i < count; i++ ) {
    665                 monitors[i]->owner     = owner;
    666                 monitors[i]->recursion = 0;
     729                /* paranoid */ verify ( monitors[i]->lock.lock );
     730                /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] );
     731                monitors[i]->owner        = owner;
     732                monitors[i]->owner_reason = reason;
     733                monitors[i]->recursion    = 0;
    667734        }
    668735}
     
    680747}
    681748
    682 static inline thread_desc * next_thread( monitor_desc * this ) {
     749static inline thread_desc * next_thread( monitor_desc * this, enum __Owner_Reason reason ) {
    683750        //Check the signaller stack
    684751        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    688755                //regardless of if we are ready to baton pass,
    689756                //we need to set the monitor as in use
    690                 set_owner( this,  urgent->owner->waiting_thread );
     757                /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     758                set_owner( this,  urgent->owner->waiting_thread, reason );
    691759
    692760                return check_condition( urgent );
     
    696764        // Get the next thread in the entry_queue
    697765        thread_desc * new_owner = pop_head( this->entry_queue );
    698         set_owner( this, new_owner );
     766        /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
     767        /* paranoid */ verify( !new_owner || new_owner->next == 0p );
     768        set_owner( this, new_owner, reason );
    699769
    700770        return new_owner;
     
    841911        // For each thread in the entry-queue
    842912        for(    thread_desc ** thrd_it = &entry_queue.head;
    843                 *thrd_it;
     913                *thrd_it != 1p;
    844914                thrd_it = &(*thrd_it)->next
    845915        ) {
  • libcfa/src/concurrency/monitor.hfa

    r9f575ea r3381ed7  
    3232        signal_stack{};
    3333        owner         = 0p;
     34        owner_reason  = __NO_OWNER;
    3435        recursion     = 0;
    3536        mask.accepted = 0p;
     
    133134              bool signal      ( condition & this );
    134135              bool signal_block( condition & this );
    135 static inline bool is_empty    ( condition & this ) { return !this.blocked.head; }
     136static inline bool is_empty    ( condition & this ) { return this.blocked.head == 1p; }
    136137         uintptr_t front       ( condition & this );
    137138
  • libcfa/src/concurrency/mutex.cfa

    r9f575ea r3381ed7  
    4040        if( is_locked ) {
    4141                append( blocked_threads, kernelTLS.this_thread );
    42                 BlockInternal( &lock );
     42                unlock( lock );
     43                park();
    4344        }
    4445        else {
     
    6263        lock( this.lock __cfaabi_dbg_ctx2 );
    6364        this.is_locked = (this.blocked_threads != 0);
    64         WakeThread(
    65                 pop_head( this.blocked_threads ), false
     65        unpark(
     66                pop_head( this.blocked_threads )
    6667        );
    6768        unlock( this.lock );
     
    9495        else {
    9596                append( blocked_threads, kernelTLS.this_thread );
    96                 BlockInternal( &lock );
     97                unlock( lock );
     98                park();
    9799        }
    98100}
     
    121123                owner = thrd;
    122124                recursion_count = (thrd ? 1 : 0);
    123                 WakeThread( thrd, false );
     125                unpark( thrd );
    124126        }
    125127        unlock( lock );
     
    138140void notify_one(condition_variable & this) with(this) {
    139141        lock( lock __cfaabi_dbg_ctx2 );
    140         WakeThread(
    141                 pop_head( this.blocked_threads ), false
     142        unpark(
     143                pop_head( this.blocked_threads )
    142144        );
    143145        unlock( lock );
     
    147149        lock( lock __cfaabi_dbg_ctx2 );
    148150        while(this.blocked_threads) {
    149                 WakeThread(
    150                         pop_head( this.blocked_threads ), false
     151                unpark(
     152                        pop_head( this.blocked_threads )
    151153                );
    152154        }
     
    157159        lock( this.lock __cfaabi_dbg_ctx2 );
    158160        append( this.blocked_threads, kernelTLS.this_thread );
    159         BlockInternal( &this.lock );
     161        unlock( this.lock );
     162        park();
    160163}
    161164
     
    164167        lock( this.lock __cfaabi_dbg_ctx2 );
    165168        append( this.blocked_threads, kernelTLS.this_thread );
    166         void __unlock(void) {
    167                 unlock(l);
    168                 unlock(this.lock);
    169         }
    170         BlockInternal( __unlock );
     169        unlock(l);
     170        unlock(this.lock);
     171        park();
    171172        lock(l);
    172173}
  • libcfa/src/concurrency/preemption.cfa

    r9f575ea r3381ed7  
    187187        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    188188                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
    189                 thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic store
    190189
    191190                with( kernelTLS.preemption_state ){
     
    209208                                if( proc->pending_preemption ) {
    210209                                        proc->pending_preemption = false;
    211                                         BlockInternal( thrd );
     210                                        force_yield( __POLL_PREEMPTION );
    212211                                }
    213212                        }
     
    394393        // Preemption can occur here
    395394
    396         kernelTLS.this_thread->preempted = true;
    397         BlockInternal(); // Do the actual CtxSwitch
     395        force_yield( __ALARM_PREEMPTION ); // Do the actual CtxSwitch
    398396}
    399397
  • libcfa/src/concurrency/thread.cfa

    r9f575ea r3381ed7  
    3636        self_cor{ name, storage, storageSize };
    3737        state = Start;
    38         preempted = false;
     38        preempted = __NO_PREEMPTION;
    3939        curr_cor = &self_cor;
    4040        self_mon.owner = &this;
     
    7878void __thrd_start( T & this, void (*main_p)(T &) ) {
    7979        thread_desc * this_thrd = get_thread(this);
    80         thread_desc * curr_thrd = TL_GET( this_thread );
    8180
    8281        disable_interrupts();
     
    8584        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
    8685        verify( this_thrd->context.SP );
    87         // CtxSwitch( &curr_thrd->context, &this_thrd->context );
    8886
    8987        ScheduleThread(this_thrd);
    9088        enable_interrupts( __cfaabi_dbg_ctx );
    91 }
    92 
    93 void yield( void ) {
    94         // Safety note : This could cause some false positives due to preemption
    95       verify( TL_GET( preemption_state.enabled ) );
    96         BlockInternal( TL_GET( this_thread ) );
    97         // Safety note : This could cause some false positives due to preemption
    98       verify( TL_GET( preemption_state.enabled ) );
    99 }
    100 
    101 void yield( unsigned times ) {
    102         for( unsigned i = 0; i < times; i++ ) {
    103                 yield();
    104         }
    10589}
    10690
  • libcfa/src/concurrency/thread.hfa

    r9f575ea r3381ed7  
    8888void ^?{}( scoped(T)& this );
    8989
    90 void yield();
    91 void yield( unsigned times );
     90//-----------------------------------------------------------------------------
     91// Thread getters
     92static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); }
    9293
    93 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); }
     94//-----------------------------------------------------------------------------
     95// Scheduler API
     96
     97//----------
     98// Park thread: block until corresponding call to unpark, won't block if unpark is already called
     99void park( void );
     100
     101//----------
     102// Unpark a thread, if the thread is already blocked, schedule it
     103//                  if the thread is not yet block, signal that it should rerun immediately or reschedule itself
     104void unpark( thread_desc * this, bool must_yield );
     105
     106static inline void unpark( thread_desc * this ) { unpark( this, false ); }
     107
     108forall( dtype T | is_thread(T) )
     109static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ), false );}
     110
     111forall( dtype T | is_thread(T) )
     112static inline void unpark( T & this, bool must_yield ) { if(!&this) return; unpark( get_thread( this ), must_yield );}
     113
     114//----------
     115// Yield: force thread to block and be rescheduled
     116static inline void yield() {
     117        unpark( active_thread(), true );
     118        park();
     119}
     120
     121// Yield: yield N times
     122static inline void yield( unsigned times ) {
     123        for( times ) {
     124                yield();
     125        }
     126}
    94127
    95128// Local Variables: //
  • tests/concurrent/examples/.expect/datingService.txt

    r9f575ea r3381ed7  
    1 Girl:17 is dating Boy at 2 with ccode 17
    2  Boy:2 is dating Girl 17 with ccode 17
    3  Boy:14 is dating Girl 5 with ccode 5
    4 Girl:5 is dating Boy at 14 with ccode 5
    5  Boy:9 is dating Girl 10 with ccode 10
    6 Girl:10 is dating Boy at 9 with ccode 10
    7  Boy:1 is dating Girl 18 with ccode 18
    8 Girl:18 is dating Boy at 1 with ccode 18
    9  Boy:16 is dating Girl 3 with ccode 3
    10 Girl:3 is dating Boy at 16 with ccode 3
    11  Boy:5 is dating Girl 14 with ccode 14
    12 Girl:14 is dating Boy at 5 with ccode 14
    13  Boy:15 is dating Girl 4 with ccode 4
    14 Girl:4 is dating Boy at 15 with ccode 4
    15 Girl:0 is dating Boy at 19 with ccode 0
    16  Boy:19 is dating Girl 0 with ccode 0
    17 Girl:9 is dating Boy at 10 with ccode 9
    18  Boy:10 is dating Girl 9 with ccode 9
    19 Girl:11 is dating Boy at 8 with ccode 11
    20  Boy:8 is dating Girl 11 with ccode 11
    21  Boy:12 is dating Girl 7 with ccode 7
    22 Girl:7 is dating Boy at 12 with ccode 7
    23  Boy:11 is dating Girl 8 with ccode 8
    24 Girl:8 is dating Boy at 11 with ccode 8
    25 Girl:16 is dating Boy at 3 with ccode 16
    26  Boy:3 is dating Girl 16 with ccode 16
    27 Girl:15 is dating Boy at 4 with ccode 15
    28  Boy:4 is dating Girl 15 with ccode 15
    29 Girl:19 is dating Boy at 0 with ccode 19
    30  Boy:0 is dating Girl 19 with ccode 19
    31 Girl:2 is dating Boy at 17 with ccode 2
    32  Boy:17 is dating Girl 2 with ccode 2
    33  Boy:13 is dating Girl 6 with ccode 6
    34 Girl:6 is dating Boy at 13 with ccode 6
    35  Boy:7 is dating Girl 12 with ccode 12
    36 Girl:12 is dating Boy at 7 with ccode 12
    37 Girl:13 is dating Boy at 6 with ccode 13
    38  Boy:6 is dating Girl 13 with ccode 13
    39 Girl:1 is dating Boy at 18 with ccode 1
    40  Boy:18 is dating Girl 1 with ccode 1
  • tests/concurrent/examples/datingService.cfa

    r9f575ea r3381ed7  
    11//
    22// Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo
    3 // 
     3//
    44// The contents of this file are covered under the licence agreement in the
    55// file "LICENCE" distributed with Cforall.
     
    3535                signal_block( Boys[ccode] );                                    // restart boy to set phone number
    3636        } // if
    37         sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode;
     37        //sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode;
    3838        return BoyPhoneNo;
    3939} // DatingService girl
     
    4747                signal_block( Girls[ccode] );                                   // restart girl to set phone number
    4848        } // if
    49         sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode;
     49        //sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode;
    5050        return GirlPhoneNo;
    5151} // DatingService boy
  • tests/concurrent/multi-monitor.cfa

    r9f575ea r3381ed7  
    1111
    1212void increment( monitor_t & mutex p1, monitor_t & mutex p2, int & value ) {
     13        assert(active_thread() == get_monitor(p1)->owner);
     14        assert(active_thread() == get_monitor(p2)->owner);
    1315        value += 1;
     16        assert(active_thread() == get_monitor(p1)->owner);
     17        assert(active_thread() == get_monitor(p2)->owner);
    1418}
    1519
Note: See TracChangeset for help on using the changeset viewer.