Changeset 969b3fe for src/libcfa


Ignore:
Timestamp:
Jul 18, 2017, 1:05:13 PM (7 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
a57cb58
Parents:
5bd0aad
Message:

More clean-up of the kernel code

Location:
src/libcfa/concurrency
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/alarm.c

    r5bd0aad r969b3fe  
    3131
    3232//=============================================================================================
     33// time type
     34//=============================================================================================
     35
     36#define one_second         1_000_000_000ul
     37#define one_milisecond         1_000_000ul
     38#define one_microsecond            1_000ul
     39#define one_nanosecond                 1ul
     40
     41__cfa_time_t zero_time = { 0 };
     42
     43void ?{}( __cfa_time_t * this ) { this->val = 0; }
     44void ?{}( __cfa_time_t * this, zero_t zero ) { this->val = 0; }
     45
     46void ?{}( itimerval * this, __cfa_time_t * alarm ) {
     47        this->it_value.tv_sec = alarm->val / one_second;                        // seconds
     48        this->it_value.tv_usec = max( (alarm->val % one_second) / one_microsecond, 1000 ); // microseconds
     49        this->it_interval.tv_sec = 0;
     50        this->it_interval.tv_usec = 0;
     51}
     52
     53
     54void ?{}( __cfa_time_t * this, timespec * curr ) {
     55        uint64_t secs  = curr->tv_sec;
     56        uint64_t nsecs = curr->tv_nsec;
     57        this->val = (secs * one_second) + nsecs;
     58}
     59
     60__cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs ) {
     61        this->val = 0;
     62        return *this;
     63}
     64
     65__cfa_time_t from_s ( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000_000ul; return ret; }
     66__cfa_time_t from_ms( uint64_t val ) { __cfa_time_t ret; ret.val = val *     1_000_000ul; return ret; }
     67__cfa_time_t from_us( uint64_t val ) { __cfa_time_t ret; ret.val = val *         1_000ul; return ret; }
     68__cfa_time_t from_ns( uint64_t val ) { __cfa_time_t ret; ret.val = val *             1ul; return ret; }
     69
     70//=============================================================================================
    3371// Clock logic
    3472//=============================================================================================
     
    3775        timespec curr;
    3876        clock_gettime( CLOCK_REALTIME, &curr );
    39         __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec;
    40         // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time );
    41         return curr_time;
     77        return (__cfa_time_t){ &curr };
    4278}
    4379
    4480void __kernel_set_timer( __cfa_time_t alarm ) {
    45         LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %llu\n", (__cfa_time_t)alarm );
    46         itimerval val;
    47         val.it_value.tv_sec = alarm / TIMEGRAN;                 // seconds
    48         val.it_value.tv_usec = max( (alarm % TIMEGRAN) / ( TIMEGRAN / 1_000_000L ), 1000 ); // microseconds
    49         val.it_interval.tv_sec = 0;
    50         val.it_interval.tv_usec = 0;
     81        itimerval val = { &alarm };
    5182        setitimer( ITIMER_REAL, &val, NULL );
    5283}
     
    5687//=============================================================================================
    5788
    58 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0 ) {
     89void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) {
    5990        this->thrd = thrd;
    6091        this->alarm = alarm;
     
    6596}
    6697
    67 void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0 ) {
     98void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) {
    6899        this->proc = proc;
    69100        this->alarm = alarm;
  • src/libcfa/concurrency/alarm.h

    r5bd0aad r969b3fe  
    2323#include "assert"
    2424
    25 typedef uint64_t __cfa_time_t;
    26 
    2725struct thread_desc;
    2826struct processor;
     27
     28struct timespec;
     29struct itimerval;
     30
     31//=============================================================================================
     32// time type
     33//=============================================================================================
     34
     35struct __cfa_time_t {
     36        uint64_t val;
     37};
     38
     39// ctors
     40void ?{}( __cfa_time_t * this );
     41void ?{}( __cfa_time_t * this, zero_t zero );
     42void ?{}( __cfa_time_t * this, timespec * curr );
     43void ?{}( itimerval * this, __cfa_time_t * alarm );
     44
     45__cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs );
     46
     47// logical ops
     48static inline bool ?==?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val == rhs.val; }
     49static inline bool ?!=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val != rhs.val; }
     50static inline bool ?>? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >  rhs.val; }
     51static inline bool ?<? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <  rhs.val; }
     52static inline bool ?>=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >= rhs.val; }
     53static inline bool ?<=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <= rhs.val; }
     54
     55static inline bool ?==?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val == rhs; }
     56static inline bool ?!=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val != rhs; }
     57static inline bool ?>? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >  rhs; }
     58static inline bool ?<? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <  rhs; }
     59static inline bool ?>=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >= rhs; }
     60static inline bool ?<=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <= rhs; }
     61
     62// addition/substract
     63static inline __cfa_time_t ?+?( __cfa_time_t lhs, __cfa_time_t rhs ) {
     64        __cfa_time_t ret;
     65        ret.val = lhs.val + rhs.val;
     66        return ret;
     67}
     68
     69static inline __cfa_time_t ?-?( __cfa_time_t lhs, __cfa_time_t rhs ) {
     70        __cfa_time_t ret;
     71        ret.val = lhs.val - rhs.val;
     72        return ret;
     73}
     74
     75__cfa_time_t from_s ( uint64_t );
     76__cfa_time_t from_ms( uint64_t );
     77__cfa_time_t from_us( uint64_t );
     78__cfa_time_t from_ns( uint64_t );
     79
     80extern __cfa_time_t zero_time;
    2981
    3082//=============================================================================================
    3183// Clock logic
    3284//=============================================================================================
    33 
    34 #define TIMEGRAN 1_000_000_000L                         // nanosecond granularity, except for timeval
    3585
    3686__cfa_time_t __kernel_get_time();
     
    57107typedef alarm_node_t ** __alarm_it_t;
    58108
    59 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0 );
    60 void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0 );
     109void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time );
     110void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time );
    61111void ^?{}( alarm_node_t * this );
    62112
  • src/libcfa/concurrency/kernel.c

    r5bd0aad r969b3fe  
    4242//-----------------------------------------------------------------------------
    4343// Kernel storage
    44 #define KERNEL_STORAGE(T,X) static char X##Storage[sizeof(T)]
    45 
    46 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx);
    47 KERNEL_STORAGE(cluster, systemCluster);
    48 KERNEL_STORAGE(system_proc_t, systemProcessor);
    49 KERNEL_STORAGE(event_kernel_t, event_kernel);
    50 KERNEL_STORAGE(thread_desc, mainThread);
     44KERNEL_STORAGE(cluster,           mainCluster);
     45KERNEL_STORAGE(processor,         mainProcessor);
     46KERNEL_STORAGE(processorCtx_t,    mainProcessorCtx);
     47KERNEL_STORAGE(thread_desc,       mainThread);
    5148KERNEL_STORAGE(machine_context_t, mainThreadCtx);
    5249
    53 cluster * systemCluster;
    54 system_proc_t * systemProcessor;
    55 event_kernel_t * event_kernel;
     50cluster *     mainCluster;
     51processor *   mainProcessor;
    5652thread_desc * mainThread;
    5753
     
    5955// Global state
    6056
    61 volatile thread_local processor * this_processor;
    6257volatile thread_local coroutine_desc * this_coroutine;
    6358volatile thread_local thread_desc * this_thread;
     59volatile thread_local processor * this_processor;
     60
    6461volatile thread_local bool preemption_in_progress = 0;
    6562volatile thread_local unsigned short disable_preempt_count = 1;
     
    8784
    8885        this->limit = (void *)(((intptr_t)this->base) - this->size);
    89         this->context = &mainThreadCtxStorage;
     86        this->context = &storage_mainThreadCtx;
    9087        this->top = this->base;
    9188}
     
    127124
    128125void ?{}(processor * this) {
    129         this{ systemCluster };
     126        this{ mainCluster };
    130127}
    131128
     
    149146
    150147        this->runner = runner;
    151         LIB_DEBUG_PRINT_SAFE("Kernel : constructing system processor context %p\n", runner);
     148        LIB_DEBUG_PRINT_SAFE("Kernel : constructing main processor context %p\n", runner);
    152149        runner{ this };
    153 }
    154 
    155 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )
    156 
    157 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) {
    158         (&this->proc){ cltr, runner };
    159 }
    160 
    161 void ?{}(event_kernel_t * this) {
    162         (&this->alarms){};
    163         (&this->lock){};
    164 
    165         verify( validate( &this->alarms ) );
    166150}
    167151
     
    346330        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    347331
    348         lock( &systemProcessor->proc.cltr->ready_queue_lock DEBUG_CTX2 );
    349         append( &systemProcessor->proc.cltr->ready_queue, thrd );
    350         unlock( &systemProcessor->proc.cltr->ready_queue_lock );
     332        lock(   &this_processor->cltr->ready_queue_lock DEBUG_CTX2 );
     333        append( &this_processor->cltr->ready_queue, thrd );
     334        unlock( &this_processor->cltr->ready_queue_lock );
    351335
    352336        verify( disable_preempt_count > 0 );
     
    455439        // Start by initializing the main thread
    456440        // SKULLDUGGERY: the mainThread steals the process main thread
    457         // which will then be scheduled by the systemProcessor normally
    458         mainThread = (thread_desc *)&mainThreadStorage;
     441        // which will then be scheduled by the mainProcessor normally
     442        mainThread = (thread_desc *)&storage_mainThread;
    459443        current_stack_info_t info;
    460444        mainThread{ &info };
     
    462446        LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n");
    463447
    464         // Initialize the system cluster
    465         systemCluster = (cluster *)&systemClusterStorage;
    466         systemCluster{};
    467 
    468         LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n");
    469 
    470         // Initialize the system processor and the system processor ctx
     448        // Initialize the main cluster
     449        mainCluster = (cluster *)&storage_mainCluster;
     450        mainCluster{};
     451
     452        LIB_DEBUG_PRINT_SAFE("Kernel : main cluster ready\n");
     453
     454        // Initialize the main processor and the main processor ctx
    471455        // (the coroutine that contains the processing control flow)
    472         systemProcessor = (system_proc_t *)&systemProcessorStorage;
    473         systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtxStorage };
    474 
    475         // Initialize the event kernel
    476         event_kernel = (event_kernel_t *)&event_kernelStorage;
    477         event_kernel{};
    478 
    479         // Add the main thread to the ready queue
    480         // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread
    481         ScheduleThread(mainThread);
     456        mainProcessor = (processor *)&storage_mainProcessor;
     457        mainProcessor{ mainCluster, (processorCtx_t *)&storage_mainProcessorCtx };
    482458
    483459        //initialize the global state variables
    484         this_processor = &systemProcessor->proc;
     460        this_processor = mainProcessor;
    485461        this_thread = mainThread;
    486462        this_coroutine = &mainThread->cor;
    487         disable_preempt_count = 1;
    488463
    489464        // Enable preemption
    490465        kernel_start_preemption();
    491466
    492         // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
     467        // Add the main thread to the ready queue
     468        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
     469        ScheduleThread(mainThread);
     470
     471        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    493472        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    494473        // mainThread is on the ready queue when this call is made.
    495         resume( systemProcessor->proc.runner );
     474        resume( mainProcessor->runner );
    496475
    497476
     
    508487        disable_interrupts();
    509488
    510         // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
     489        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
    511490        // When its coroutine terminates, it return control to the mainThread
    512491        // which is currently here
    513         systemProcessor->proc.do_terminate = true;
     492        mainProcessor->do_terminate = true;
    514493        suspend();
    515494
     
    519498        kernel_stop_preemption();
    520499
    521         // Destroy the system processor and its context in reverse order of construction
     500        // Destroy the main processor and its context in reverse order of construction
    522501        // These were manually constructed so we need manually destroy them
    523         ^(systemProcessor->proc.runner){};
    524         ^(systemProcessor){};
     502        ^(mainProcessor->runner){};
     503        ^(mainProcessor){};
    525504
    526505        // Final step, destroy the main thread since it is no longer needed
  • src/libcfa/concurrency/kernel_private.h

    r5bd0aad r969b3fe  
    3131extern "C" {
    3232        void disable_interrupts();
    33         void enable_interrupts_noRF();
     33        void enable_interrupts_noPoll();
    3434        void enable_interrupts( DEBUG_CTX_PARAM );
    3535}
     
    6666void spin(processor * this, unsigned int * spin_count);
    6767
    68 struct system_proc_t {
    69         processor proc;
    70 };
    71 
    7268struct event_kernel_t {
    7369        alarm_list_t alarms;
     
    7571};
    7672
    77 extern cluster * systemCluster;
    78 extern system_proc_t * systemProcessor;
    7973extern event_kernel_t * event_kernel;
    8074
     
    9488extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
    9589
     90//-----------------------------------------------------------------------------
     91// Utils
     92#define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
     93
    9694#endif //KERNEL_PRIVATE_H
    9795
  • src/libcfa/concurrency/preemption.c

    r5bd0aad r969b3fe  
    3434#endif
    3535
     36//TODO move to defaults
    3637#define __CFA_DEFAULT_PREEMPTION__ 10000
    3738
     39//TODO move to defaults
    3840__attribute__((weak)) unsigned int default_preemption() {
    3941        return __CFA_DEFAULT_PREEMPTION__;
    4042}
    4143
     44// Short hands for signal context information
    4245#define __CFA_SIGCXT__ ucontext_t *
    4346#define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt
    4447
     48// FwdDeclarations : timeout handlers
    4549static void preempt( processor   * this );
    4650static void timeout( thread_desc * this );
    4751
     52// FwdDeclarations : Signal handlers
    4853void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
    49 void sigHandler_alarm    ( __CFA_SIGPARMS__ );
    5054void sigHandler_segv     ( __CFA_SIGPARMS__ );
    5155void sigHandler_abort    ( __CFA_SIGPARMS__ );
    5256
     57// FwdDeclarations : sigaction wrapper
    5358static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags );
    54 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); )
    55 
     59
     60// FwdDeclarations : alarm thread main
     61void * alarm_loop( __attribute__((unused)) void * args );
     62
     63// Machine specific register name
    5664#ifdef __x86_64__
    5765#define CFA_REG_IP REG_RIP
     
    6068#endif
    6169
     70KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
     71event_kernel_t * event_kernel;                        // kernel public handle to even kernel
     72static pthread_t alarm_thread;                        // pthread handle to alarm thread
     73
     74void ?{}(event_kernel_t * this) {
     75        (&this->alarms){};
     76        (&this->lock){};
     77}
    6278
    6379//=============================================================================================
     
    6581//=============================================================================================
    6682
     83// Get next expired node
     84static inline alarm_node_t * get_expired( alarm_list_t * alarms, __cfa_time_t currtime ) {
     85        if( !alarms->head ) return NULL;                          // If no alarms return null
     86        if( alarms->head->alarm >= currtime ) return NULL;        // If alarms head not expired return null
     87        return pop(alarms);                                       // Otherwise just pop head
     88}
     89
     90// Tick one frame of the Discrete Event Simulation for alarms
    6791void tick_preemption() {
    68         alarm_list_t * alarms = &event_kernel->alarms;
    69         __cfa_time_t currtime = __kernel_get_time();
    70 
    71         // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption @ %llu\n", currtime );
    72         while( alarms->head && alarms->head->alarm < currtime ) {
    73                 alarm_node_t * node = pop(alarms);
    74                 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node );
    75 
     92        alarm_node_t * node = NULL;                     // Used in the while loop but cannot be declared in the while condition
     93        alarm_list_t * alarms = &event_kernel->alarms;  // Local copy for ease of reading
     94        __cfa_time_t currtime = __kernel_get_time();    // Check current time once so we everything "happens at once"
     95
     96        //Loop throught every thing expired
     97        while( node = get_expired( alarms, currtime ) ) {
     98
     99                // Check if this is a kernel
    76100                if( node->kernel_alarm ) {
    77101                        preempt( node->proc );
     
    81105                }
    82106
    83                 verify( validate( alarms ) );
    84 
     107                // Check if this is a periodic alarm
    85108                __cfa_time_t period = node->period;
    86109                if( period > 0 ) {
    87                         node->alarm = currtime + period;
    88                         // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Reinsert %p @ %llu (%llu + %llu)\n", node, node->alarm, currtime, period );
    89                         insert( alarms, node );
     110                        node->alarm = currtime + period;    // Alarm is periodic, add currtime to it (used cached current time)
     111                        insert( alarms, node );             // Reinsert the node for the next time it triggers
    90112                }
    91113                else {
    92                         node->set = false;
    93                 }
    94         }
    95 
    96         if( alarms->head ) {
    97                 __kernel_set_timer( alarms->head->alarm - currtime );
    98         }
    99 
    100         verify( validate( alarms ) );
    101         // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" );
    102 }
    103 
     114                        node->set = false;                  // Node is one-shot, just mark it as not pending
     115                }
     116        }
     117
     118        // If there are still alarms pending, reset the timer
     119        if( alarms->head ) { __kernel_set_timer( alarms->head->alarm - currtime ); }
     120}
     121
     122// Update the preemption of a processor and notify interested parties
    104123void update_preemption( processor * this, __cfa_time_t duration ) {
    105         LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %llu\n", this, duration );
    106 
    107124        alarm_node_t * alarm = this->preemption_alarm;
    108         duration *= 1000;
    109125
    110126        // Alarms need to be enabled
     
    136152
    137153extern "C" {
     154        // Disable interrupts by incrementing the counter
    138155        void disable_interrupts() {
    139156                __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST );
    140                 verify( new_val < (unsigned short)65_000 );
    141                 verify( new_val != (unsigned short) 0 );
    142         }
    143 
    144         void enable_interrupts_noRF() {
    145                 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    146                 verify( prev != (unsigned short) 0 );
    147         }
    148 
     157                verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
     158        }
     159
     160        // Enable interrupts by decrementing the counter
     161        // If counter reaches 0, execute any pending CtxSwitch
    149162        void enable_interrupts( DEBUG_CTX_PARAM ) {
    150                 processor * proc   = this_processor;
    151                 thread_desc * thrd = this_thread;
     163                processor * proc   = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
     164                thread_desc * thrd = this_thread;         // Cache the thread now since interrupts can start happening after the atomic add
     165
    152166                unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    153                 verify( prev != (unsigned short) 0 );
     167                verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
     168
     169                // Check if we need to prempt the thread because an interrupt was missed
    154170                if( prev == 1 && proc->pending_preemption ) {
    155171                        proc->pending_preemption = false;
     
    157173                }
    158174
     175                // For debugging purposes : keep track of the last person to enable the interrupts
    159176                LIB_DEBUG_DO( proc->last_enable = caller; )
    160177        }
    161 }
    162 
     178
     179        // Disable interrupts by incrementint the counter
     180        // Don't execute any pending CtxSwitch even if counter reaches 0
     181        void enable_interrupts_noPoll() {
     182                __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
     183                verify( prev != 0u );                     // If this triggers someone is enabled already enabled interrupts
     184        }
     185}
     186
     187// sigprocmask wrapper : unblock a single signal
    163188static inline void signal_unblock( int sig ) {
    164189        sigset_t mask;
     
    171196}
    172197
     198// sigprocmask wrapper : block a single signal
    173199static inline void signal_block( int sig ) {
    174200        sigset_t mask;
     
    181207}
    182208
    183 static inline bool preemption_ready() {
    184         return disable_preempt_count == 0 && !preemption_in_progress;
    185 }
    186 
    187 static inline void defer_ctxSwitch() {
    188         this_processor->pending_preemption = true;
    189 }
    190 
     209// kill wrapper : signal a processor
    191210static void preempt( processor * this ) {
    192211        pthread_kill( this->kernel_thread, SIGUSR1 );
    193212}
    194213
     214// reserved for future use
    195215static void timeout( thread_desc * this ) {
    196216        //TODO : implement waking threads
    197217}
    198218
     219
     220// Check if a CtxSwitch signal handler shoud defer
     221// If true  : preemption is safe
     222// If false : preemption is unsafe and marked as pending
     223static inline bool preemption_ready() {
     224        bool ready = disable_preempt_count == 0 && !preemption_in_progress; // Check if preemption is safe
     225        this_processor->pending_preemption = !ready;                        // Adjust the pending flag accordingly
     226        return ready;
     227}
     228
    199229//=============================================================================================
    200230// Kernel Signal Startup/Shutdown logic
    201231//=============================================================================================
    202232
    203 static pthread_t alarm_thread;
    204 void * alarm_loop( __attribute__((unused)) void * args );
    205 
     233// Startup routine to activate preemption
     234// Called from kernel_startup
    206235void kernel_start_preemption() {
    207236        LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n");
    208         __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO );
    209         // __kernel_sigaction( SIGSEGV, sigHandler_segv     , SA_SIGINFO );
    210         // __kernel_sigaction( SIGBUS , sigHandler_segv     , SA_SIGINFO );
     237
     238        // Start with preemption disabled until ready
     239        disable_preempt_count = 1;
     240
     241        // Initialize the event kernel
     242        event_kernel = (event_kernel_t *)&storage_event_kernel;
     243        event_kernel{};
     244
     245        // Setup proper signal handlers
     246        __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO );         // CtxSwitch handler
     247        // __kernel_sigaction( SIGSEGV, sigHandler_segv     , SA_SIGINFO );      // Failure handler
     248        // __kernel_sigaction( SIGBUS , sigHandler_segv     , SA_SIGINFO );      // Failure handler
    211249
    212250        signal_block( SIGALRM );
     
    215253}
    216254
     255// Shutdown routine to deactivate preemption
     256// Called from kernel_shutdown
    217257void kernel_stop_preemption() {
    218258        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopping\n");
    219259
     260        // Block all signals since we are already shutting down
    220261        sigset_t mask;
    221262        sigfillset( &mask );
    222263        sigprocmask( SIG_BLOCK, &mask, NULL );
    223264
     265        // Notify the alarm thread of the shutdown
    224266        sigval val = { 1 };
    225267        pthread_sigqueue( alarm_thread, SIGALRM, val );
     268
     269        // Wait for the preemption thread to finish
    226270        pthread_join( alarm_thread, NULL );
     271
     272        // Preemption is now fully stopped
     273
    227274        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n");
    228275}
    229276
     277// Raii ctor/dtor for the preemption_scope
     278// Used by thread to control when they want to receive preemption signals
    230279void ?{}( preemption_scope * this, processor * proc ) {
    231         (&this->alarm){ proc };
     280        (&this->alarm){ proc, zero_time, zero_time };
    232281        this->proc = proc;
    233282        this->proc->preemption_alarm = &this->alarm;
    234         update_preemption( this->proc, this->proc->cltr->preemption );
     283
     284        update_preemption( this->proc, from_us(this->proc->cltr->preemption) );
    235285}
    236286
     
    238288        disable_interrupts();
    239289
    240         update_preemption( this->proc, 0 );
     290        update_preemption( this->proc, zero_time );
    241291}
    242292
     
    245295//=============================================================================================
    246296
     297// Context switch signal handler
     298// Receives SIGUSR1 signal and causes the current thread to yield
    247299void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
    248300        LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); )
    249         if( preemption_ready() ) {
    250                 preemption_in_progress = true;
    251                 signal_unblock( SIGUSR1 );
    252                 this_processor->pending_preemption = false;
    253                 preemption_in_progress = false;
    254                 BlockInternal( (thread_desc*)this_thread );
    255         }
    256         else {
    257                 defer_ctxSwitch();
    258         }
    259 }
    260 
     301
     302        // Check if it is safe to preempt here
     303        if( !preemption_ready() ) { return; }
     304
     305        preemption_in_progress = true;                      // Sync flag : prevent recursive calls to the signal handler
     306        signal_unblock( SIGUSR1 );                          // We are about to CtxSwitch out of the signal handler, let other handlers in
     307        preemption_in_progress = false;                     // Clear the in progress flag
     308
     309        // Preemption can occur here
     310
     311        BlockInternal( (thread_desc*)this_thread );         // Do the actual CtxSwitch
     312}
     313
     314// Main of the alarm thread
     315// Waits on SIGALRM and send SIGUSR1 to whom ever needs it
    261316void * alarm_loop( __attribute__((unused)) void * args ) {
     317        // Block sigalrms to control when they arrive
    262318        sigset_t mask;
    263319        sigemptyset( &mask );
     
    268324        }
    269325
     326        // Main loop
    270327        while( true ) {
     328                // Wait for a sigalrm
    271329                siginfo_t info;
    272330                int sig = sigwaitinfo( &mask, &info );
     331
     332                // If another signal arrived something went wrong
    273333                assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int);
    274334
    275335                LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );
     336                // Switch on the code (a.k.a. the sender) to
    276337                switch( info.si_code )
    277338                {
     339                // Timers can apparently be marked as sent for the kernel
     340                // In either case, tick preemption
    278341                case SI_TIMER:
    279342                case SI_KERNEL:
     
    283346                        unlock( &event_kernel->lock );
    284347                        break;
     348                // Signal was not sent by the kernel but by an other thread
    285349                case SI_QUEUE:
     350                        // For now, other thread only signal the alarm thread to shut it down
     351                        // If this needs to change use info.si_value and handle the case here
    286352                        goto EXIT;
    287353                }
     
    293359}
    294360
     361// Sigaction wrapper : register an signal handler
    295362static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) {
    296363        struct sigaction act;
     
    308375}
    309376
    310 typedef void (*sa_handler_t)(int);
    311 
     377// Sigaction wrapper : restore default handler
    312378static void __kernel_sigdefault( int sig ) {
    313379        struct sigaction act;
    314380
    315         // act.sa_handler = SIG_DFL;
     381        act.sa_handler = SIG_DFL;
    316382        act.sa_flags = 0;
    317383        sigemptyset( &act.sa_mask );
Note: See TracChangeset for help on using the changeset viewer.