Ignore:
Timestamp:
Jun 7, 2018, 6:12:11 PM (7 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
Children:
6eb131c, 7b28e4a
Parents:
174845e (diff), 85b1deb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
src/libcfa/concurrency
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/kernel

    r174845e rbeefc34c  
    113113        pthread_t kernel_thread;
    114114
     115        // RunThread data
     116        // Action to do after a thread is ran
     117        struct FinishAction finish;
     118
     119        // Preemption data
     120        // Node which is added in the discrete event simulaiton
     121        struct alarm_node_t * preemption_alarm;
     122
     123        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
     124        bool pending_preemption;
     125
     126        // Idle lock
     127        __bin_sem_t idleLock;
     128
    115129        // Termination
    116130        // Set to true to notify the processor should terminate
     
    120134        semaphore terminated;
    121135
    122         // RunThread data
    123         // Action to do after a thread is ran
    124         struct FinishAction finish;
    125 
    126         // Preemption data
    127         // Node which is added in the discrete event simulaiton
    128         struct alarm_node_t * preemption_alarm;
    129 
    130         // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
    131         bool pending_preemption;
    132 
    133         // Idle lock
    134         sem_t idleLock;
    135 
    136136        // Link lists fields
    137         struct {
     137        struct __dbg_node_proc {
    138138                struct processor * next;
    139139                struct processor * prev;
     
    182182
    183183        // Link lists fields
    184         struct {
     184        struct __dbg_node_cltr {
    185185                cluster * next;
    186186                cluster * prev;
  • src/libcfa/concurrency/kernel.c

    r174845e rbeefc34c  
    1616//C Includes
    1717#include <stddef.h>
     18#include <errno.h>
     19#include <string.h>
    1820extern "C" {
    1921#include <stdio.h>
     
    4951thread_desc * mainThread;
    5052
    51 struct { __dllist_t(cluster) list; __spinlock_t lock; } global_clusters;
     53extern "C" {
     54struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
     55}
    5256
    5357//-----------------------------------------------------------------------------
     
    143147        runner.proc = &this;
    144148
    145         sem_init(&idleLock, 0, 0);
     149        idleLock{};
    146150
    147151        start( &this );
     
    149153
    150154void ^?{}(processor & this) with( this ){
    151         if( ! do_terminate ) {
     155        if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
    152156                __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
    153                 terminate(&this);
    154                 verify(this.do_terminate);
    155                 verify( kernelTLS.this_processor != &this);
     157
     158                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
     159                wake( &this );
     160
    156161                P( terminated );
    157162                verify( kernelTLS.this_processor != &this);
    158                 pthread_join( kernel_thread, NULL );
    159         }
    160 
    161         sem_destroy(&idleLock);
     163        }
     164
     165        pthread_join( kernel_thread, NULL );
    162166}
    163167
     
    198202
    199203                thread_desc * readyThread = NULL;
    200                 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ )
     204                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ )
    201205                {
    202206                        readyThread = nextThread( this->cltr );
     
    217221                        else
    218222                        {
    219                                 spin(this, &spin_count);
     223                                // spin(this, &spin_count);
     224                                halt(this);
    220225                        }
    221226                }
     
    290295}
    291296
    292 // Handles spinning logic
    293 // TODO : find some strategy to put cores to sleep after some time
    294 void spin(processor * this, unsigned int * spin_count) {
    295         // (*spin_count)++;
    296         halt(this);
    297 }
    298 
    299297// KERNEL_ONLY
    300298// Context invoker for processors
     
    403401                unlock( ready_queue_lock );
    404402
    405                 if( was_empty ) {
     403                if(was_empty) {
    406404                        lock      (proc_list_lock __cfaabi_dbg_ctx2);
    407405                        if(idles) {
    408                                 wake(idles.head);
     406                                wake_fast(idles.head);
    409407                        }
    410408                        unlock    (proc_list_lock);
    411409                }
     410                else if( struct processor * idle = idles.head ) {
     411                        wake_fast(idle);
     412                }
     413
    412414        }
    413415
     
    544546        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    545547
    546         global_clusters.list{ __get };
    547         global_clusters.lock{};
     548        __cfa_dbg_global_clusters.list{ __get };
     549        __cfa_dbg_global_clusters.lock{};
    548550
    549551        // Initialize the main cluster
     
    626628        // When its coroutine terminates, it return control to the mainThread
    627629        // which is currently here
    628         mainProcessor->do_terminate = true;
     630        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    629631        returnToKernel();
     632        mainThread->self_cor.state = Halted;
    630633
    631634        // THE SYSTEM IS NOW COMPLETELY STOPPED
     
    643646        ^(mainThread){};
    644647
    645         ^(global_clusters.list){};
    646         ^(global_clusters.lock){};
     648        ^(__cfa_dbg_global_clusters.list){};
     649        ^(__cfa_dbg_global_clusters.lock){};
    647650
    648651        __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
     
    654657
    655658void halt(processor * this) with( *this ) {
     659        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
     660
    656661        with( *cltr ) {
    657662                lock      (proc_list_lock __cfaabi_dbg_ctx2);
     
    663668        __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
    664669
    665         verify( ({int sval = 0; sem_getvalue(&this->idleLock, &sval); sval; }) < 200);
    666         int __attribute__((unused)) ret = sem_wait(&idleLock);
    667         verify(ret > 0 || errno == EINTR);
     670        wait( idleLock );
    668671
    669672        __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
     
    675678                unlock    (proc_list_lock);
    676679        }
    677 }
    678 
    679 void wake(processor * this) {
    680         __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this);
    681         int __attribute__((unused)) ret = sem_post(&this->idleLock);
    682         verify(ret > 0 || errno == EINTR);
    683         verify( ({int sval = 0; sem_getvalue(&this->idleLock, &sval); sval; }) < 200);
    684680}
    685681
     
    797793// Global Queues
    798794void doregister( cluster     & cltr ) {
    799         lock      ( global_clusters.lock __cfaabi_dbg_ctx2);
    800         push_front( global_clusters.list, cltr );
    801         unlock    ( global_clusters.lock );
     795        lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
     796        push_front( __cfa_dbg_global_clusters.list, cltr );
     797        unlock    ( __cfa_dbg_global_clusters.lock );
    802798}
    803799
    804800void unregister( cluster     & cltr ) {
    805         lock  ( global_clusters.lock __cfaabi_dbg_ctx2);
    806         remove( global_clusters.list, cltr );
    807         unlock( global_clusters.lock );
     801        lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
     802        remove( __cfa_dbg_global_clusters.list, cltr );
     803        unlock( __cfa_dbg_global_clusters.lock );
    808804}
    809805
  • src/libcfa/concurrency/kernel_private.h

    r174845e rbeefc34c  
    5858void finishRunning(processor * this);
    5959void halt(processor * this);
    60 void wake(processor * this);
    61 void terminate(processor * this);
    62 void spin(processor * this, unsigned int * spin_count);
     60
     61static inline void wake_fast(processor * this) {
     62        __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this);
     63        post( this->idleLock );
     64}
     65
     66static inline void wake(processor * this) {
     67        disable_interrupts();
     68        wake_fast(this);
     69        enable_interrupts( __cfaabi_dbg_ctx );
     70}
    6371
    6472struct event_kernel_t {
     
    6876
    6977extern event_kernel_t * event_kernel;
    70 
    71 //extern thread_local coroutine_desc * volatile this_coroutine;
    72 //extern thread_local thread_desc *    volatile this_thread;
    73 //extern thread_local processor *      volatile this_processor;
    74 
    75 // extern volatile thread_local bool preemption_in_progress;
    76 // extern volatile thread_local bool preemption_enabled;
    77 // extern volatile thread_local unsigned short disable_preempt_count;
    7878
    7979struct __cfa_kernel_preemption_state_t {
  • src/libcfa/concurrency/preemption.c

    r174845e rbeefc34c  
    1010// Created On       : Mon Jun 5 14:20:42 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Apr  9 13:52:39 2018
    13 // Update Count     : 36
     12// Last Modified On : Tue Jun  5 17:35:49 2018
     13// Update Count     : 37
    1414//
    1515
     
    116116        // If there are still alarms pending, reset the timer
    117117        if( alarms->head ) {
    118                 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%lu(%lu) resetting alarm to %lu.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
     118                __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
    119119                Duration delta = alarms->head->alarm - currtime;
    120120                Duration caped = max(delta, 50`us);
     
    263263}
    264264
    265 // kill wrapper : signal a processor
    266 void terminate(processor * this) {
    267         this->do_terminate = true;
    268         wake(this);
    269         sigval_t value = { PREEMPT_TERMINATE };
    270         pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
    271 }
    272 
    273265// reserved for future use
    274266static void timeout( thread_desc * this ) {
     
    369361        choose(sfp->si_value.sival_int) {
    370362                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    371                 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate);
     363                case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
    372364                default:
    373365                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    488480}
    489481
     482#ifdef __CFA_WITH_VERIFY__
     483bool __cfaabi_dbg_in_kernel() {
     484        return !kernelTLS.preemption_state.enabled;
     485}
     486#endif
     487
    490488// Local Variables: //
    491489// mode: c //
Note: See TracChangeset for help on using the changeset viewer.