Changeset 21a5dde1 for src/libcfa


Ignore:
Timestamp:
Jul 20, 2017, 11:33:59 PM (7 years ago)
Author:
Peter A. Buhr <pabuhr@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
6d54c3a
Parents:
dab7ac7 (diff), e1e4aa9 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg2:software/cfa/cfa-cc

Location:
src/libcfa/concurrency
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/alarm.c

    rdab7ac7 r21a5dde1  
    3131
    3232//=============================================================================================
     33// time type
     34//=============================================================================================
     35
     36#define one_second         1_000_000_000ul
     37#define one_milisecond         1_000_000ul
     38#define one_microsecond            1_000ul
     39#define one_nanosecond                 1ul
     40
     41__cfa_time_t zero_time = { 0 };
     42
     43void ?{}( __cfa_time_t * this ) { this->val = 0; }
     44void ?{}( __cfa_time_t * this, zero_t zero ) { this->val = 0; }
     45
     46void ?{}( itimerval * this, __cfa_time_t * alarm ) {
     47        this->it_value.tv_sec = alarm->val / one_second;                        // seconds
     48        this->it_value.tv_usec = max( (alarm->val % one_second) / one_microsecond, 1000 ); // microseconds
     49        this->it_interval.tv_sec = 0;
     50        this->it_interval.tv_usec = 0;
     51}
     52
     53
     54void ?{}( __cfa_time_t * this, timespec * curr ) {
     55        uint64_t secs  = curr->tv_sec;
     56        uint64_t nsecs = curr->tv_nsec;
     57        this->val = (secs * one_second) + nsecs;
     58}
     59
     60__cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs ) {
     61        this->val = 0;
     62        return *this;
     63}
     64
     65__cfa_time_t from_s ( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000_000ul; return ret; }
     66__cfa_time_t from_ms( uint64_t val ) { __cfa_time_t ret; ret.val = val *     1_000_000ul; return ret; }
     67__cfa_time_t from_us( uint64_t val ) { __cfa_time_t ret; ret.val = val *         1_000ul; return ret; }
     68__cfa_time_t from_ns( uint64_t val ) { __cfa_time_t ret; ret.val = val *             1ul; return ret; }
     69
     70//=============================================================================================
    3371// Clock logic
    3472//=============================================================================================
     
    3775        timespec curr;
    3876        clock_gettime( CLOCK_REALTIME, &curr );
    39         __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec;
    40         // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time );
    41         return curr_time;
     77        return (__cfa_time_t){ &curr };
    4278}
    4379
    4480void __kernel_set_timer( __cfa_time_t alarm ) {
    45         LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %llu\n", (__cfa_time_t)alarm );
    46         itimerval val;
    47         val.it_value.tv_sec = alarm / TIMEGRAN;                 // seconds
    48         val.it_value.tv_usec = max( (alarm % TIMEGRAN) / ( TIMEGRAN / 1_000_000L ), 1000 ); // microseconds
    49         val.it_interval.tv_sec = 0;
    50         val.it_interval.tv_usec = 0;
     81        itimerval val = { &alarm };
    5182        setitimer( ITIMER_REAL, &val, NULL );
    5283}
     
    5687//=============================================================================================
    5788
    58 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0 ) {
     89void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) {
    5990        this->thrd = thrd;
    6091        this->alarm = alarm;
     
    6596}
    6697
    67 void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0 ) {
     98void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) {
    6899        this->proc = proc;
    69100        this->alarm = alarm;
     
    153184
    154185void register_self( alarm_node_t * this ) {
     186        alarm_list_t * alarms = &event_kernel->alarms;
     187
    155188        disable_interrupts();
    156         verify( !systemProcessor->pending_alarm );
    157         lock( &systemProcessor->alarm_lock DEBUG_CTX2 );
     189        lock( &event_kernel->lock DEBUG_CTX2 );
    158190        {
    159                 verify( validate( &systemProcessor->alarms ) );
    160                 bool first = !systemProcessor->alarms.head;
    161 
    162                 insert( &systemProcessor->alarms, this );
    163                 if( systemProcessor->pending_alarm ) {
    164                         tick_preemption();
     191                verify( validate( alarms ) );
     192                bool first = !alarms->head;
     193
     194                insert( alarms, this );
     195                if( first ) {
     196                        __kernel_set_timer( alarms->head->alarm - __kernel_get_time() );
    165197                }
    166                 if( first ) {
    167                         __kernel_set_timer( systemProcessor->alarms.head->alarm - __kernel_get_time() );
    168                 }
    169         }
    170         unlock( &systemProcessor->alarm_lock );
     198        }
     199        unlock( &event_kernel->lock );
    171200        this->set = true;
    172201        enable_interrupts( DEBUG_CTX );
     
    174203
    175204void unregister_self( alarm_node_t * this ) {
    176         // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : unregister %p start\n", this );
    177205        disable_interrupts();
    178         lock( &systemProcessor->alarm_lock DEBUG_CTX2 );
     206        lock( &event_kernel->lock DEBUG_CTX2 );
    179207        {
    180                 verify( validate( &systemProcessor->alarms ) );
    181                 remove( &systemProcessor->alarms, this );
    182         }
    183         unlock( &systemProcessor->alarm_lock );
     208                verify( validate( &event_kernel->alarms ) );
     209                remove( &event_kernel->alarms, this );
     210        }
     211        unlock( &event_kernel->lock );
    184212        enable_interrupts( DEBUG_CTX );
    185213        this->set = false;
    186         // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Kernel : unregister %p end\n", this );
    187 }
     214}
  • src/libcfa/concurrency/alarm.h

    rdab7ac7 r21a5dde1  
    2323#include <assert.h>
    2424
    25 typedef uint64_t __cfa_time_t;
    26 
    2725struct thread_desc;
    2826struct processor;
     27
     28struct timespec;
     29struct itimerval;
     30
     31//=============================================================================================
     32// time type
     33//=============================================================================================
     34
     35struct __cfa_time_t {
     36        uint64_t val;
     37};
     38
     39// ctors
     40void ?{}( __cfa_time_t * this );
     41void ?{}( __cfa_time_t * this, zero_t zero );
     42void ?{}( __cfa_time_t * this, timespec * curr );
     43void ?{}( itimerval * this, __cfa_time_t * alarm );
     44
     45__cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs );
     46
     47// logical ops
     48static inline bool ?==?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val == rhs.val; }
     49static inline bool ?!=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val != rhs.val; }
     50static inline bool ?>? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >  rhs.val; }
     51static inline bool ?<? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <  rhs.val; }
     52static inline bool ?>=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >= rhs.val; }
     53static inline bool ?<=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <= rhs.val; }
     54
     55static inline bool ?==?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val == rhs; }
     56static inline bool ?!=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val != rhs; }
     57static inline bool ?>? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >  rhs; }
     58static inline bool ?<? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <  rhs; }
     59static inline bool ?>=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >= rhs; }
     60static inline bool ?<=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <= rhs; }
     61
     62// addition/substract
     63static inline __cfa_time_t ?+?( __cfa_time_t lhs, __cfa_time_t rhs ) {
     64        __cfa_time_t ret;
     65        ret.val = lhs.val + rhs.val;
     66        return ret;
     67}
     68
     69static inline __cfa_time_t ?-?( __cfa_time_t lhs, __cfa_time_t rhs ) {
     70        __cfa_time_t ret;
     71        ret.val = lhs.val - rhs.val;
     72        return ret;
     73}
     74
     75__cfa_time_t from_s ( uint64_t );
     76__cfa_time_t from_ms( uint64_t );
     77__cfa_time_t from_us( uint64_t );
     78__cfa_time_t from_ns( uint64_t );
     79
     80extern __cfa_time_t zero_time;
    2981
    3082//=============================================================================================
    3183// Clock logic
    3284//=============================================================================================
    33 
    34 #define TIMEGRAN 1_000_000_000L                         // nanosecond granularity, except for timeval
    3585
    3686__cfa_time_t __kernel_get_time();
     
    57107typedef alarm_node_t ** __alarm_it_t;
    58108
    59 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0 );
    60 void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0 );
     109void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time );
     110void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time );
    61111void ^?{}( alarm_node_t * this );
    62112
  • src/libcfa/concurrency/coroutine

    rdab7ac7 r21a5dde1  
    6363
    6464// Get current coroutine
    65 extern volatile thread_local coroutine_desc * this_coroutine;
     65extern thread_local coroutine_desc * volatile this_coroutine;
    6666
    6767// Private wrappers for context switch and stack creation
  • src/libcfa/concurrency/coroutine.c

    rdab7ac7 r21a5dde1  
    2626}
    2727
    28 #include "kernel"
    29 #include "libhdr.h"
     28#include "kernel_private.h"
    3029
    3130#define __CFA_INVOKE_PRIVATE__
    3231#include "invoke.h"
    3332
    34 extern volatile thread_local processor * this_processor;
    3533
    3634//-----------------------------------------------------------------------------
  • src/libcfa/concurrency/kernel

    rdab7ac7 r21a5dde1  
    2828//-----------------------------------------------------------------------------
    2929// Locks
    30 bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );
    31 void lock      ( spinlock * DEBUG_CTX_PARAM2 );
    32 void lock_yield( spinlock * DEBUG_CTX_PARAM2 );
    33 void unlock    ( spinlock * );
     30void lock      ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, spin if already acquired
     31void lock_yield( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, yield repeatedly if already acquired
     32bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, return false if already acquired
     33void unlock    ( spinlock * );                        // Unlock the spinlock
    3434
    3535struct semaphore {
     
    4848// Cluster
    4949struct cluster {
    50         __thread_queue_t ready_queue;
    51         spinlock lock;
     50        spinlock ready_queue_lock;                      // Ready queue locks
     51        __thread_queue_t ready_queue;                   // Ready queue for threads
     52        unsigned long long int preemption;              // Preemption rate on this cluster
    5253};
    5354
     
    7677static inline void ^?{}(FinishAction * this) {}
    7778
     79// Processor
     80// Wrapper around kernel threads
    7881struct processor {
    79         struct processorCtx_t * runner;
    80         cluster * cltr;
    81         pthread_t kernel_thread;
     82        // Main state
     83        struct processorCtx_t * runner;                 // Coroutine ctx who does keeps the state of the processor
     84        cluster * cltr;                                 // Cluster from which to get threads
     85        pthread_t kernel_thread;                        // Handle to pthreads
    8286
    83         semaphore terminated;
    84         volatile bool is_terminated;
     87        // Termination
     88        volatile bool do_terminate;                     // Set to true to notify the processor should terminate
     89        semaphore terminated;                           // Termination synchronisation
    8590
    86         struct FinishAction finish;
     91        // RunThread data
     92        struct FinishAction finish;                     // Action to do after a thread is ran
    8793
    88         struct alarm_node_t * preemption_alarm;
    89         unsigned int preemption;
     94        // Preemption data
     95        struct alarm_node_t * preemption_alarm;         // Node which is added in the discrete event simulaiton
     96        bool pending_preemption;                        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
    9097
    91         bool pending_preemption;
    92 
    93         char * last_enable;
     98#ifdef __CFA_DEBUG__
     99        char * last_enable;                             // Last function to enable preemption on this processor
     100#endif
    94101};
    95102
  • src/libcfa/concurrency/kernel.c

    rdab7ac7 r21a5dde1  
    4242//-----------------------------------------------------------------------------
    4343// Kernel storage
    44 #define KERNEL_STORAGE(T,X) static char X##Storage[sizeof(T)]
    45 
    46 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx);
    47 KERNEL_STORAGE(cluster, systemCluster);
    48 KERNEL_STORAGE(system_proc_t, systemProcessor);
    49 KERNEL_STORAGE(thread_desc, mainThread);
     44KERNEL_STORAGE(cluster,           mainCluster);
     45KERNEL_STORAGE(processor,         mainProcessor);
     46KERNEL_STORAGE(processorCtx_t,    mainProcessorCtx);
     47KERNEL_STORAGE(thread_desc,       mainThread);
    5048KERNEL_STORAGE(machine_context_t, mainThreadCtx);
    5149
    52 cluster * systemCluster;
    53 system_proc_t * systemProcessor;
     50cluster *     mainCluster;
     51processor *   mainProcessor;
    5452thread_desc * mainThread;
    5553
     
    5755// Global state
    5856
    59 volatile thread_local processor * this_processor;
    60 volatile thread_local coroutine_desc * this_coroutine;
    61 volatile thread_local thread_desc * this_thread;
     57thread_local coroutine_desc * volatile this_coroutine;
     58thread_local thread_desc *    volatile this_thread;
     59thread_local processor *      volatile this_processor;
     60
    6261volatile thread_local bool preemption_in_progress = 0;
    6362volatile thread_local unsigned short disable_preempt_count = 1;
     
    8584
    8685        this->limit = (void *)(((intptr_t)this->base) - this->size);
    87         this->context = &mainThreadCtxStorage;
     86        this->context = &storage_mainThreadCtx;
    8887        this->top = this->base;
    8988}
     
    125124
    126125void ?{}(processor * this) {
    127         this{ systemCluster };
     126        this{ mainCluster };
    128127}
    129128
     
    131130        this->cltr = cltr;
    132131        (&this->terminated){ 0 };
    133         this->is_terminated = false;
     132        this->do_terminate = false;
    134133        this->preemption_alarm = NULL;
    135         this->preemption = default_preemption();
    136134        this->pending_preemption = false;
    137135
     
    142140        this->cltr = cltr;
    143141        (&this->terminated){ 0 };
    144         this->is_terminated = false;
     142        this->do_terminate = false;
    145143        this->preemption_alarm = NULL;
    146         this->preemption = default_preemption();
    147144        this->pending_preemption = false;
    148145        this->kernel_thread = pthread_self();
    149146
    150147        this->runner = runner;
    151         LIB_DEBUG_PRINT_SAFE("Kernel : constructing system processor context %p\n", runner);
     148        LIB_DEBUG_PRINT_SAFE("Kernel : constructing main processor context %p\n", runner);
    152149        runner{ this };
    153150}
    154151
    155 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )
    156 
    157 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) {
    158         (&this->alarms){};
    159         (&this->alarm_lock){};
    160         this->pending_alarm = false;
    161 
    162         (&this->proc){ cltr, runner };
    163 
    164         verify( validate( &this->alarms ) );
    165 }
    166 
    167152void ^?{}(processor * this) {
    168         if( ! this->is_terminated ) {
     153        if( ! this->do_terminate ) {
    169154                LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", this);
    170                 this->is_terminated = true;
     155                this->do_terminate = true;
    171156                P( &this->terminated );
    172157                pthread_join( this->kernel_thread, NULL );
     
    176161void ?{}(cluster * this) {
    177162        ( &this->ready_queue ){};
    178         ( &this->lock ){};
     163        ( &this->ready_queue_lock ){};
     164
     165        this->preemption = default_preemption();
    179166}
    180167
     
    199186
    200187                thread_desc * readyThread = NULL;
    201                 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ )
     188                for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ )
    202189                {
    203190                        readyThread = nextThread( this->cltr );
     
    343330        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    344331
    345         lock( &systemProcessor->proc.cltr->lock DEBUG_CTX2 );
    346         append( &systemProcessor->proc.cltr->ready_queue, thrd );
    347         unlock( &systemProcessor->proc.cltr->lock );
     332        lock(   &this_processor->cltr->ready_queue_lock DEBUG_CTX2 );
     333        append( &this_processor->cltr->ready_queue, thrd );
     334        unlock( &this_processor->cltr->ready_queue_lock );
    348335
    349336        verify( disable_preempt_count > 0 );
     
    352339thread_desc * nextThread(cluster * this) {
    353340        verify( disable_preempt_count > 0 );
    354         lock( &this->lock DEBUG_CTX2 );
     341        lock( &this->ready_queue_lock DEBUG_CTX2 );
    355342        thread_desc * head = pop_head( &this->ready_queue );
    356         unlock( &this->lock );
     343        unlock( &this->ready_queue_lock );
    357344        verify( disable_preempt_count > 0 );
    358345        return head;
     
    452439        // Start by initializing the main thread
    453440        // SKULLDUGGERY: the mainThread steals the process main thread
    454         // which will then be scheduled by the systemProcessor normally
    455         mainThread = (thread_desc *)&mainThreadStorage;
     441        // which will then be scheduled by the mainProcessor normally
     442        mainThread = (thread_desc *)&storage_mainThread;
    456443        current_stack_info_t info;
    457444        mainThread{ &info };
     
    459446        LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n");
    460447
    461         // Initialize the system cluster
    462         systemCluster = (cluster *)&systemClusterStorage;
    463         systemCluster{};
    464 
    465         LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n");
    466 
    467         // Initialize the system processor and the system processor ctx
     448        // Initialize the main cluster
     449        mainCluster = (cluster *)&storage_mainCluster;
     450        mainCluster{};
     451
     452        LIB_DEBUG_PRINT_SAFE("Kernel : main cluster ready\n");
     453
     454        // Initialize the main processor and the main processor ctx
    468455        // (the coroutine that contains the processing control flow)
    469         systemProcessor = (system_proc_t *)&systemProcessorStorage;
    470         systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtxStorage };
    471 
    472         // Add the main thread to the ready queue
    473         // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread
    474         ScheduleThread(mainThread);
     456        mainProcessor = (processor *)&storage_mainProcessor;
     457        mainProcessor{ mainCluster, (processorCtx_t *)&storage_mainProcessorCtx };
    475458
    476459        //initialize the global state variables
    477         this_processor = &systemProcessor->proc;
     460        this_processor = mainProcessor;
    478461        this_thread = mainThread;
    479462        this_coroutine = &mainThread->cor;
    480         disable_preempt_count = 1;
    481463
    482464        // Enable preemption
    483465        kernel_start_preemption();
    484466
    485         // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
     467        // Add the main thread to the ready queue
     468        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
     469        ScheduleThread(mainThread);
     470
     471        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    486472        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    487473        // mainThread is on the ready queue when this call is made.
    488         resume( systemProcessor->proc.runner );
     474        resume( mainProcessor->runner );
    489475
    490476
     
    501487        disable_interrupts();
    502488
    503         // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
     489        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
    504490        // When its coroutine terminates, it return control to the mainThread
    505491        // which is currently here
    506         systemProcessor->proc.is_terminated = true;
     492        mainProcessor->do_terminate = true;
    507493        suspend();
    508494
     
    512498        kernel_stop_preemption();
    513499
    514         // Destroy the system processor and its context in reverse order of construction
     500        // Destroy the main processor and its context in reverse order of construction
    515501        // These were manually constructed so we need manually destroy them
    516         ^(systemProcessor->proc.runner){};
    517         ^(systemProcessor){};
     502        ^(mainProcessor->runner){};
     503        ^(mainProcessor){};
    518504
    519505        // Final step, destroy the main thread since it is no longer needed
  • src/libcfa/concurrency/kernel_private.h

    rdab7ac7 r21a5dde1  
    3131extern "C" {
    3232        void disable_interrupts();
    33         void enable_interrupts_noRF();
     33        void enable_interrupts_noPoll();
    3434        void enable_interrupts( DEBUG_CTX_PARAM );
    3535}
     
    4545thread_desc * nextThread(cluster * this);
    4646
     47//Block current thread and release/wake-up the following resources
    4748void BlockInternal(void);
    4849void BlockInternal(spinlock * lock);
     
    6566void spin(processor * this, unsigned int * spin_count);
    6667
    67 struct system_proc_t {
    68         processor proc;
    69 
     68struct event_kernel_t {
    7069        alarm_list_t alarms;
    71         spinlock alarm_lock;
    72 
    73         bool pending_alarm;
     70        spinlock lock;
    7471};
    7572
    76 extern cluster * systemCluster;
    77 extern system_proc_t * systemProcessor;
    78 extern volatile thread_local processor * this_processor;
    79 extern volatile thread_local coroutine_desc * this_coroutine;
    80 extern volatile thread_local thread_desc * this_thread;
     73extern event_kernel_t * event_kernel;
     74
     75extern thread_local coroutine_desc * volatile this_coroutine;
     76extern thread_local thread_desc *    volatile this_thread;
     77extern thread_local processor *      volatile this_processor;
     78
    8179extern volatile thread_local bool preemption_in_progress;
    8280extern volatile thread_local unsigned short disable_preempt_count;
     
    9189extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
    9290
     91//-----------------------------------------------------------------------------
     92// Utils
     93#define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
     94
    9395#endif //KERNEL_PRIVATE_H
    9496
  • src/libcfa/concurrency/preemption.c

    rdab7ac7 r21a5dde1  
    3434#endif
    3535
     36//TODO move to defaults
    3637#define __CFA_DEFAULT_PREEMPTION__ 10000
    3738
     39//TODO move to defaults
    3840__attribute__((weak)) unsigned int default_preemption() {
    3941        return __CFA_DEFAULT_PREEMPTION__;
    4042}
    4143
     44// Short hands for signal context information
    4245#define __CFA_SIGCXT__ ucontext_t *
    4346#define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt
    4447
     48// FwdDeclarations : timeout handlers
    4549static void preempt( processor   * this );
    4650static void timeout( thread_desc * this );
    4751
     52// FwdDeclarations : Signal handlers
    4853void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
    49 void sigHandler_alarm    ( __CFA_SIGPARMS__ );
    5054void sigHandler_segv     ( __CFA_SIGPARMS__ );
    5155void sigHandler_abort    ( __CFA_SIGPARMS__ );
    5256
     57// FwdDeclarations : sigaction wrapper
    5358static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags );
    54 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )
    55 
     59
     60// FwdDeclarations : alarm thread main
     61void * alarm_loop( __attribute__((unused)) void * args );
     62
     63// Machine specific register name
    5664#ifdef __x86_64__
    5765#define CFA_REG_IP REG_RIP
     
    6068#endif
    6169
     70KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
     71event_kernel_t * event_kernel;                        // kernel public handle to even kernel
     72static pthread_t alarm_thread;                        // pthread handle to alarm thread
     73
     74void ?{}(event_kernel_t * this) {
     75        (&this->alarms){};
     76        (&this->lock){};
     77}
    6278
    6379//=============================================================================================
     
    6581//=============================================================================================
    6682
     83// Get next expired node
     84static inline alarm_node_t * get_expired( alarm_list_t * alarms, __cfa_time_t currtime ) {
     85        if( !alarms->head ) return NULL;                          // If no alarms return null
     86        if( alarms->head->alarm >= currtime ) return NULL;        // If alarms head not expired return null
     87        return pop(alarms);                                       // Otherwise just pop head
     88}
     89
     90// Tick one frame of the Discrete Event Simulation for alarms
    6791void tick_preemption() {
    68         alarm_list_t * alarms = &systemProcessor->alarms;
    69         __cfa_time_t currtime = __kernel_get_time();
    70 
    71         // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption @ %llu\n", currtime );
    72         while( alarms->head && alarms->head->alarm < currtime ) {
    73                 alarm_node_t * node = pop(alarms);
    74                 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node );
    75 
     92        alarm_node_t * node = NULL;                     // Used in the while loop but cannot be declared in the while condition
     93        alarm_list_t * alarms = &event_kernel->alarms;  // Local copy for ease of reading
     94        __cfa_time_t currtime = __kernel_get_time();    // Check current time once so we everything "happens at once"
     95
     96        //Loop throught every thing expired
     97        while( node = get_expired( alarms, currtime ) ) {
     98
     99                // Check if this is a kernel
    76100                if( node->kernel_alarm ) {
    77101                        preempt( node->proc );
     
    81105                }
    82106
    83                 verify( validate( alarms ) );
    84 
     107                // Check if this is a periodic alarm
    85108                __cfa_time_t period = node->period;
    86109                if( period > 0 ) {
    87                         node->alarm = currtime + period;
    88                         // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Reinsert %p @ %llu (%llu + %llu)\n", node, node->alarm, currtime, period );
    89                         insert( alarms, node );
     110                        node->alarm = currtime + period;    // Alarm is periodic, add currtime to it (used cached current time)
     111                        insert( alarms, node );             // Reinsert the node for the next time it triggers
    90112                }
    91113                else {
    92                         node->set = false;
    93                 }
    94         }
    95 
    96         if( alarms->head ) {
    97                 __kernel_set_timer( alarms->head->alarm - currtime );
    98         }
    99 
    100         verify( validate( alarms ) );
    101         // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" );
    102 }
    103 
     114                        node->set = false;                  // Node is one-shot, just mark it as not pending
     115                }
     116        }
     117
     118        // If there are still alarms pending, reset the timer
     119        if( alarms->head ) { __kernel_set_timer( alarms->head->alarm - currtime ); }
     120}
     121
     122// Update the preemption of a processor and notify interested parties
    104123void update_preemption( processor * this, __cfa_time_t duration ) {
    105         LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %llu\n", this, duration );
    106 
    107124        alarm_node_t * alarm = this->preemption_alarm;
    108         duration *= 1000;
    109125
    110126        // Alarms need to be enabled
     
    136152
    137153extern "C" {
     154        // Disable interrupts by incrementing the counter
    138155        void disable_interrupts() {
    139156                __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST );
    140                 verify( new_val < (unsigned short)65_000 );
    141                 verify( new_val != (unsigned short) 0 );
    142         }
    143 
    144         void enable_interrupts_noRF() {
    145                 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    146                 verify( prev != (unsigned short) 0 );
    147         }
    148 
     157                verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
     158        }
     159
     160        // Enable interrupts by decrementing the counter
     161        // If counter reaches 0, execute any pending CtxSwitch
    149162        void enable_interrupts( DEBUG_CTX_PARAM ) {
    150                 processor * proc   = this_processor;
    151                 thread_desc * thrd = this_thread;
     163                processor * proc   = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
     164                thread_desc * thrd = this_thread;         // Cache the thread now since interrupts can start happening after the atomic add
     165
    152166                unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    153                 verify( prev != (unsigned short) 0 );
     167                verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
     168
     169                // Check if we need to prempt the thread because an interrupt was missed
    154170                if( prev == 1 && proc->pending_preemption ) {
    155171                        proc->pending_preemption = false;
     
    157173                }
    158174
     175                // For debugging purposes : keep track of the last person to enable the interrupts
    159176                LIB_DEBUG_DO( proc->last_enable = caller; )
    160177        }
    161 }
    162 
     178
     179        // Disable interrupts by incrementint the counter
     180        // Don't execute any pending CtxSwitch even if counter reaches 0
     181        void enable_interrupts_noPoll() {
     182                __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
     183                verify( prev != 0u );                     // If this triggers someone is enabled already enabled interrupts
     184        }
     185}
     186
     187// sigprocmask wrapper : unblock a single signal
    163188static inline void signal_unblock( int sig ) {
    164189        sigset_t mask;
     
    171196}
    172197
     198// sigprocmask wrapper : block a single signal
    173199static inline void signal_block( int sig ) {
    174200        sigset_t mask;
     
    181207}
    182208
    183 static inline bool preemption_ready() {
    184         return disable_preempt_count == 0 && !preemption_in_progress;
    185 }
    186 
    187 static inline void defer_ctxSwitch() {
    188         this_processor->pending_preemption = true;
    189 }
    190 
    191 static inline void defer_alarm() {
    192         systemProcessor->pending_alarm = true;
    193 }
    194 
     209// kill wrapper : signal a processor
    195210static void preempt( processor * this ) {
    196211        pthread_kill( this->kernel_thread, SIGUSR1 );
    197212}
    198213
     214// reserved for future use
    199215static void timeout( thread_desc * this ) {
    200216        //TODO : implement waking threads
    201217}
    202218
     219
     220// Check if a CtxSwitch signal handler shoud defer
     221// If true  : preemption is safe
     222// If false : preemption is unsafe and marked as pending
     223static inline bool preemption_ready() {
     224        bool ready = disable_preempt_count == 0 && !preemption_in_progress; // Check if preemption is safe
     225        this_processor->pending_preemption = !ready;                        // Adjust the pending flag accordingly
     226        return ready;
     227}
     228
    203229//=============================================================================================
    204230// Kernel Signal Startup/Shutdown logic
    205231//=============================================================================================
    206232
    207 static pthread_t alarm_thread;
    208 void * alarm_loop( __attribute__((unused)) void * args );
    209 
     233// Startup routine to activate preemption
     234// Called from kernel_startup
    210235void kernel_start_preemption() {
    211236        LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n");
    212         __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO );
    213         // __kernel_sigaction( SIGSEGV, sigHandler_segv     , SA_SIGINFO );
    214         // __kernel_sigaction( SIGBUS , sigHandler_segv     , SA_SIGINFO );
     237
     238        // Start with preemption disabled until ready
     239        disable_preempt_count = 1;
     240
     241        // Initialize the event kernel
     242        event_kernel = (event_kernel_t *)&storage_event_kernel;
     243        event_kernel{};
     244
     245        // Setup proper signal handlers
     246        __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO );         // CtxSwitch handler
     247        // __kernel_sigaction( SIGSEGV, sigHandler_segv     , SA_SIGINFO );      // Failure handler
     248        // __kernel_sigaction( SIGBUS , sigHandler_segv     , SA_SIGINFO );      // Failure handler
    215249
    216250        signal_block( SIGALRM );
     
    219253}
    220254
     255// Shutdown routine to deactivate preemption
     256// Called from kernel_shutdown
    221257void kernel_stop_preemption() {
    222258        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopping\n");
    223259
     260        // Block all signals since we are already shutting down
    224261        sigset_t mask;
    225262        sigfillset( &mask );
    226263        sigprocmask( SIG_BLOCK, &mask, NULL );
    227264
     265        // Notify the alarm thread of the shutdown
    228266        sigval val = { 1 };
    229267        pthread_sigqueue( alarm_thread, SIGALRM, val );
     268
     269        // Wait for the preemption thread to finish
    230270        pthread_join( alarm_thread, NULL );
     271
     272        // Preemption is now fully stopped
     273
    231274        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n");
    232275}
    233276
     277// Raii ctor/dtor for the preemption_scope
     278// Used by thread to control when they want to receive preemption signals
    234279void ?{}( preemption_scope * this, processor * proc ) {
    235         (&this->alarm){ proc };
     280        (&this->alarm){ proc, zero_time, zero_time };
    236281        this->proc = proc;
    237282        this->proc->preemption_alarm = &this->alarm;
    238         update_preemption( this->proc, this->proc->preemption );
     283
     284        update_preemption( this->proc, from_us(this->proc->cltr->preemption) );
    239285}
    240286
     
    242288        disable_interrupts();
    243289
    244         update_preemption( this->proc, 0 );
     290        update_preemption( this->proc, zero_time );
    245291}
    246292
     
    249295//=============================================================================================
    250296
     297// Context switch signal handler
     298// Receives SIGUSR1 signal and causes the current thread to yield
    251299void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
    252300        LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); )
    253         if( preemption_ready() ) {
    254                 preemption_in_progress = true;
    255                 signal_unblock( SIGUSR1 );
    256                 this_processor->pending_preemption = false;
    257                 preemption_in_progress = false;
    258                 BlockInternal( (thread_desc*)this_thread );
    259         }
    260         else {
    261                 defer_ctxSwitch();
    262         }
    263 }
    264 
     301
     302        // Check if it is safe to preempt here
     303        if( !preemption_ready() ) { return; }
     304
     305        preemption_in_progress = true;                      // Sync flag : prevent recursive calls to the signal handler
     306        signal_unblock( SIGUSR1 );                          // We are about to CtxSwitch out of the signal handler, let other handlers in
     307        preemption_in_progress = false;                     // Clear the in progress flag
     308
     309        // Preemption can occur here
     310
     311        BlockInternal( (thread_desc*)this_thread );         // Do the actual CtxSwitch
     312}
     313
     314// Main of the alarm thread
     315// Waits on SIGALRM and send SIGUSR1 to whom ever needs it
    265316void * alarm_loop( __attribute__((unused)) void * args ) {
     317        // Block sigalrms to control when they arrive
    266318        sigset_t mask;
    267319        sigemptyset( &mask );
     
    272324        }
    273325
     326        // Main loop
    274327        while( true ) {
     328                // Wait for a sigalrm
    275329                siginfo_t info;
    276330                int sig = sigwaitinfo( &mask, &info );
     331
     332                // If another signal arrived something went wrong
    277333                assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int);
    278334
    279335                LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );
     336                // Switch on the code (a.k.a. the sender) to
    280337                switch( info.si_code )
    281338                {
     339                // Timers can apparently be marked as sent for the kernel
     340                // In either case, tick preemption
    282341                case SI_TIMER:
    283342                case SI_KERNEL:
    284343                        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n");
    285                         lock( &systemProcessor->alarm_lock DEBUG_CTX2 );
     344                        lock( &event_kernel->lock DEBUG_CTX2 );
    286345                        tick_preemption();
    287                         unlock( &systemProcessor->alarm_lock );
     346                        unlock( &event_kernel->lock );
    288347                        break;
     348                // Signal was not sent by the kernel but by an other thread
    289349                case SI_QUEUE:
     350                        // For now, other thread only signal the alarm thread to shut it down
     351                        // If this needs to change use info.si_value and handle the case here
    290352                        goto EXIT;
    291353                }
     
    297359}
    298360
     361// Sigaction wrapper : register an signal handler
    299362static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) {
    300363        struct sigaction act;
     
    312375}
    313376
    314 typedef void (*sa_handler_t)(int);
    315 
     377// Sigaction wrapper : restore default handler
    316378static void __kernel_sigdefault( int sig ) {
    317379        struct sigaction act;
    318380
    319         // act.sa_handler = SIG_DFL;
     381        act.sa_handler = SIG_DFL;
    320382        act.sa_flags = 0;
    321383        sigemptyset( &act.sa_mask );
  • src/libcfa/concurrency/thread

    rdab7ac7 r21a5dde1  
    5454}
    5555
    56 extern volatile thread_local thread_desc * this_thread;
     56extern thread_local thread_desc * volatile this_thread;
    5757
    5858forall( dtype T | is_thread(T) )
  • src/libcfa/concurrency/thread.c

    rdab7ac7 r21a5dde1  
    8787
    8888void yield( void ) {
    89         BlockInternal( (thread_desc *)this_thread );
     89        BlockInternal( this_thread );
    9090}
    9191
Note: See TracChangeset for help on using the changeset viewer.