Ignore:
Timestamp:
Jul 19, 2017, 11:49:33 AM (8 years ago)
Author:
Aaron Moss <a3moss@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
9cc0472
Parents:
fea3faa (diff), a57cb58 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
src/libcfa/concurrency
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/CtxSwitch-i386.S

    rfea3faa rb826e6b  
    9898        ret
    9999
    100 .text
    101         .align 2
    102 .globl  CtxGet
    103 CtxGet:
    104         movl %esp,SP_OFFSET(%eax)
    105         movl %ebp,FP_OFFSET(%eax)
    106 
    107         ret
    108 
    109100// Local Variables: //
    110101// compile-command: "make install" //
  • src/libcfa/concurrency/CtxSwitch-x86_64.S

    rfea3faa rb826e6b  
    1 //                               -*- Mode: Asm -*- 
     1//                               -*- Mode: Asm -*-
    22//
    33// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
     
    1818// Free Software  Foundation; either  version 2.1 of  the License, or  (at your
    1919// option) any later version.
    20 // 
     20//
    2121// This library is distributed in the  hope that it will be useful, but WITHOUT
    2222// ANY  WARRANTY;  without even  the  implied  warranty  of MERCHANTABILITY  or
    2323// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License
    2424// for more details.
    25 // 
     25//
    2626// You should  have received a  copy of the  GNU Lesser General  Public License
    2727// along  with this library.
    28 // 
     28//
    2929
    3030// This context switch routine depends on the fact that the stack of a new
     
    9393.globl  CtxInvokeStub
    9494CtxInvokeStub:
    95         movq %rbx, %rdi 
     95        movq %rbx, %rdi
    9696        jmp *%r12
    97 
    98 .text
    99         .align 2
    100 .globl  CtxGet
    101 CtxGet:
    102         movq %rsp,SP_OFFSET(%rdi)
    103         movq %rbp,FP_OFFSET(%rdi)
    104 
    105         ret
    10697
    10798// Local Variables: //
  • src/libcfa/concurrency/alarm.c

    rfea3faa rb826e6b  
    1616
    1717extern "C" {
     18#include <errno.h>
     19#include <stdio.h>
     20#include <string.h>
    1821#include <time.h>
     22#include <unistd.h>
    1923#include <sys/time.h>
    2024}
     25
     26#include "libhdr.h"
    2127
    2228#include "alarm.h"
     
    2531
    2632//=============================================================================================
     33// time type
     34//=============================================================================================
     35
     36#define one_second         1_000_000_000ul
     37#define one_milisecond         1_000_000ul
     38#define one_microsecond            1_000ul
     39#define one_nanosecond                 1ul
     40
     41__cfa_time_t zero_time = { 0 };
     42
     43void ?{}( __cfa_time_t * this ) { this->val = 0; }
     44void ?{}( __cfa_time_t * this, zero_t zero ) { this->val = 0; }
     45
     46void ?{}( itimerval * this, __cfa_time_t * alarm ) {
     47        this->it_value.tv_sec = alarm->val / one_second;                        // seconds
     48        this->it_value.tv_usec = max( (alarm->val % one_second) / one_microsecond, 1000 ); // microseconds
     49        this->it_interval.tv_sec = 0;
     50        this->it_interval.tv_usec = 0;
     51}
     52
     53
     54void ?{}( __cfa_time_t * this, timespec * curr ) {
     55        uint64_t secs  = curr->tv_sec;
     56        uint64_t nsecs = curr->tv_nsec;
     57        this->val = (secs * one_second) + nsecs;
     58}
     59
     60__cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs ) {
     61        this->val = 0;
     62        return *this;
     63}
     64
     65__cfa_time_t from_s ( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000_000ul; return ret; }
     66__cfa_time_t from_ms( uint64_t val ) { __cfa_time_t ret; ret.val = val *     1_000_000ul; return ret; }
     67__cfa_time_t from_us( uint64_t val ) { __cfa_time_t ret; ret.val = val *         1_000ul; return ret; }
     68__cfa_time_t from_ns( uint64_t val ) { __cfa_time_t ret; ret.val = val *             1ul; return ret; }
     69
     70//=============================================================================================
    2771// Clock logic
    2872//=============================================================================================
     
    3175        timespec curr;
    3276        clock_gettime( CLOCK_REALTIME, &curr );
    33         return ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec;
     77        return (__cfa_time_t){ &curr };
    3478}
    3579
    3680void __kernel_set_timer( __cfa_time_t alarm ) {
    37         itimerval val;
    38         val.it_value.tv_sec = alarm / TIMEGRAN;                 // seconds
    39         val.it_value.tv_usec = (alarm % TIMEGRAN) / ( TIMEGRAN / 1_000_000L ); // microseconds
    40         val.it_interval.tv_sec = 0;
    41         val.it_interval.tv_usec = 0;
     81        itimerval val = { &alarm };
    4282        setitimer( ITIMER_REAL, &val, NULL );
    4383}
     
    4787//=============================================================================================
    4888
    49 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0 ) {
     89void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) {
    5090        this->thrd = thrd;
    5191        this->alarm = alarm;
     
    5696}
    5797
    58 void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0 ) {
     98void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) {
    5999        this->proc = proc;
    60100        this->alarm = alarm;
     
    71111}
    72112
     113LIB_DEBUG_DO( bool validate( alarm_list_t * this ) {
     114        alarm_node_t ** it = &this->head;
     115        while( (*it) ) {
     116                it = &(*it)->next;
     117        }
     118
     119        return it == this->tail;
     120})
     121
    73122static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) {
    74         assert( !n->next );
     123        verify( !n->next );
    75124        if( p == this->tail ) {
    76125                this->tail = &n->next;
     
    80129        }
    81130        *p = n;
     131
     132        verify( validate( this ) );
    82133}
    83134
     
    89140
    90141        insert_at( this, n, it );
     142
     143        verify( validate( this ) );
    91144}
    92145
     
    100153                head->next = NULL;
    101154        }
     155        verify( validate( this ) );
    102156        return head;
    103157}
     
    105159static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) {
    106160        verify( it );
    107         verify( (*it)->next == n );
    108 
    109         (*it)->next = n->next;
     161        verify( (*it) == n );
     162
     163        (*it) = n->next;
    110164        if( !n-> next ) {
    111165                this->tail = it;
    112166        }
    113167        n->next = NULL;
     168
     169        verify( validate( this ) );
    114170}
    115171
    116172static inline void remove( alarm_list_t * this, alarm_node_t * n ) {
    117173        alarm_node_t ** it = &this->head;
    118         while( (*it) && (*it)->next != n ) {
     174        while( (*it) && (*it) != n ) {
    119175                it = &(*it)->next;
    120176        }
    121177
     178        verify( validate( this ) );
     179
    122180        if( *it ) { remove_at( this, n, it ); }
     181
     182        verify( validate( this ) );
    123183}
    124184
    125185void register_self( alarm_node_t * this ) {
     186        alarm_list_t * alarms = &event_kernel->alarms;
     187
    126188        disable_interrupts();
    127         assert( !systemProcessor->pending_alarm );
    128         lock( &systemProcessor->alarm_lock );
     189        lock( &event_kernel->lock DEBUG_CTX2 );
    129190        {
    130                 insert( &systemProcessor->alarms, this );
    131                 if( systemProcessor->pending_alarm ) {
    132                         tick_preemption();
     191                verify( validate( alarms ) );
     192                bool first = !alarms->head;
     193
     194                insert( alarms, this );
     195                if( first ) {
     196                        __kernel_set_timer( alarms->head->alarm - __kernel_get_time() );
    133197                }
    134198        }
    135         unlock( &systemProcessor->alarm_lock );
     199        unlock( &event_kernel->lock );
    136200        this->set = true;
    137         enable_interrupts();
     201        enable_interrupts( DEBUG_CTX );
    138202}
    139203
    140204void unregister_self( alarm_node_t * this ) {
    141205        disable_interrupts();
    142         lock( &systemProcessor->alarm_lock );
    143         remove( &systemProcessor->alarms, this );
    144         unlock( &systemProcessor->alarm_lock );
    145         disable_interrupts();
     206        lock( &event_kernel->lock DEBUG_CTX2 );
     207        {
     208                verify( validate( &event_kernel->alarms ) );
     209                remove( &event_kernel->alarms, this );
     210        }
     211        unlock( &event_kernel->lock );
     212        enable_interrupts( DEBUG_CTX );
    146213        this->set = false;
    147214}
  • src/libcfa/concurrency/alarm.h

    rfea3faa rb826e6b  
    1919
    2020#include <stdbool.h>
     21#include <stdint.h>
    2122
    2223#include "assert"
    23 
    24 typedef unsigned long int __cfa_time_t;
    2524
    2625struct thread_desc;
    2726struct processor;
    2827
     28struct timespec;
     29struct itimerval;
     30
     31//=============================================================================================
     32// time type
     33//=============================================================================================
     34
     35struct __cfa_time_t {
     36        uint64_t val;
     37};
     38
     39// ctors
     40void ?{}( __cfa_time_t * this );
     41void ?{}( __cfa_time_t * this, zero_t zero );
     42void ?{}( __cfa_time_t * this, timespec * curr );
     43void ?{}( itimerval * this, __cfa_time_t * alarm );
     44
     45__cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs );
     46
     47// logical ops
     48static inline bool ?==?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val == rhs.val; }
     49static inline bool ?!=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val != rhs.val; }
     50static inline bool ?>? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >  rhs.val; }
     51static inline bool ?<? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <  rhs.val; }
     52static inline bool ?>=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >= rhs.val; }
     53static inline bool ?<=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <= rhs.val; }
     54
     55static inline bool ?==?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val == rhs; }
     56static inline bool ?!=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val != rhs; }
     57static inline bool ?>? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >  rhs; }
     58static inline bool ?<? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <  rhs; }
     59static inline bool ?>=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >= rhs; }
     60static inline bool ?<=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <= rhs; }
     61
     62// addition/substract
     63static inline __cfa_time_t ?+?( __cfa_time_t lhs, __cfa_time_t rhs ) {
     64        __cfa_time_t ret;
     65        ret.val = lhs.val + rhs.val;
     66        return ret;
     67}
     68
     69static inline __cfa_time_t ?-?( __cfa_time_t lhs, __cfa_time_t rhs ) {
     70        __cfa_time_t ret;
     71        ret.val = lhs.val - rhs.val;
     72        return ret;
     73}
     74
     75__cfa_time_t from_s ( uint64_t );
     76__cfa_time_t from_ms( uint64_t );
     77__cfa_time_t from_us( uint64_t );
     78__cfa_time_t from_ns( uint64_t );
     79
     80extern __cfa_time_t zero_time;
     81
    2982//=============================================================================================
    3083// Clock logic
    3184//=============================================================================================
    32 
    33 #define TIMEGRAN 1_000_000_000L                         // nanosecond granularity, except for timeval
    3485
    3586__cfa_time_t __kernel_get_time();
     
    56107typedef alarm_node_t ** __alarm_it_t;
    57108
    58 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0 );
    59 void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0 );
     109void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time );
     110void ?{}( alarm_node_t * this, processor   * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time );
    60111void ^?{}( alarm_node_t * this );
    61112
  • src/libcfa/concurrency/coroutine

    rfea3faa rb826e6b  
    6363
    6464// Get current coroutine
    65 coroutine_desc * this_coroutine(void);
     65extern volatile thread_local coroutine_desc * this_coroutine;
    6666
    6767// Private wrappers for context switch and stack creation
     
    7171// Suspend implementation inlined for performance
    7272static inline void suspend() {
    73         coroutine_desc * src = this_coroutine();                // optimization
     73        coroutine_desc * src = this_coroutine;          // optimization
    7474
    7575        assertf( src->last != 0,
     
    8888forall(dtype T | is_coroutine(T))
    8989static inline void resume(T * cor) {
    90         coroutine_desc * src = this_coroutine();                // optimization
     90        coroutine_desc * src = this_coroutine;          // optimization
    9191        coroutine_desc * dst = get_coroutine(cor);
    9292
     
    112112
    113113static inline void resume(coroutine_desc * dst) {
    114         coroutine_desc * src = this_coroutine();                // optimization
     114        coroutine_desc * src = this_coroutine;          // optimization
    115115
    116116        // not resuming self ?
  • src/libcfa/concurrency/coroutine.c

    rfea3faa rb826e6b  
    3232#include "invoke.h"
    3333
    34 extern thread_local processor * this_processor;
     34extern volatile thread_local processor * this_processor;
    3535
    3636//-----------------------------------------------------------------------------
     
    4444// Coroutine ctors and dtors
    4545void ?{}(coStack_t* this) {
    46         this->size              = 10240;        // size of stack
     46        this->size              = 65000;        // size of stack
    4747        this->storage   = NULL; // pointer to stack
    4848        this->limit             = NULL; // stack grows towards stack limit
     
    5050        this->context   = NULL; // address of cfa_context_t
    5151        this->top               = NULL; // address of top of storage
    52         this->userStack = false;       
     52        this->userStack = false;
    5353}
    5454
     
    106106
    107107        // set state of current coroutine to inactive
    108         src->state = Inactive;
     108        src->state = src->state == Halted ? Halted : Inactive;
    109109
    110110        // set new coroutine that task is executing
    111         this_processor->current_coroutine = dst;
     111        this_coroutine = dst;
    112112
    113113        // context switch to specified coroutine
     114        assert( src->stack.context );
    114115        CtxSwitch( src->stack.context, dst->stack.context );
    115         // when CtxSwitch returns we are back in the src coroutine             
     116        // when CtxSwitch returns we are back in the src coroutine
    116117
    117118        // set state of new coroutine to active
     
    131132                this->size = libCeiling( storageSize, 16 );
    132133                // use malloc/memalign because "new" raises an exception for out-of-memory
    133                
     134
    134135                // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
    135136                LIB_DEBUG_DO( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) );
  • src/libcfa/concurrency/invoke.c

    rfea3faa rb826e6b  
    2929
    3030extern void __suspend_internal(void);
    31 extern void __leave_monitor_desc( struct monitor_desc * this );
     31extern void __leave_thread_monitor( struct thread_desc * this );
     32extern void disable_interrupts();
     33extern void enable_interrupts( DEBUG_CTX_PARAM );
    3234
    3335void CtxInvokeCoroutine(
    34       void (*main)(void *), 
    35       struct coroutine_desc *(*get_coroutine)(void *), 
     36      void (*main)(void *),
     37      struct coroutine_desc *(*get_coroutine)(void *),
    3638      void *this
    3739) {
     
    5658
    5759void CtxInvokeThread(
    58       void (*dtor)(void *), 
    59       void (*main)(void *), 
    60       struct thread_desc *(*get_thread)(void *), 
     60      void (*dtor)(void *),
     61      void (*main)(void *),
     62      struct thread_desc *(*get_thread)(void *),
    6163      void *this
    6264) {
     65      // First suspend, once the thread arrives here,
     66      // the function pointer to main can be invalidated without risk
    6367      __suspend_internal();
    6468
     69      // Fetch the thread handle from the user defined thread structure
    6570      struct thread_desc* thrd = get_thread( this );
    66       struct coroutine_desc* cor = &thrd->cor;
    67       struct monitor_desc* mon = &thrd->mon;
    68       cor->state = Active;
    6971
    70       // LIB_DEBUG_PRINTF("Invoke Thread : invoking main %p (args %p)\n", main, this);
     72      // Officially start the thread by enabling preemption
     73      enable_interrupts( DEBUG_CTX );
     74
     75      // Call the main of the thread
    7176      main( this );
    7277
    73       __leave_monitor_desc( mon );
    74 
     78      // To exit a thread we must :
     79      // 1 - Mark it as halted
     80      // 2 - Leave its monitor
     81      // 3 - Disable the interupts
     82      // 4 - Final suspend
     83      // The order of these 4 operations is very important
    7584      //Final suspend, should never return
    76       __suspend_internal();
     85      __leave_thread_monitor( thrd );
    7786      abortf("Resumed dead thread");
    7887}
     
    8089
    8190void CtxStart(
    82       void (*main)(void *), 
    83       struct coroutine_desc *(*get_coroutine)(void *), 
    84       void *this, 
     91      void (*main)(void *),
     92      struct coroutine_desc *(*get_coroutine)(void *),
     93      void *this,
    8594      void (*invoke)(void *)
    8695) {
     
    108117        ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke;
    109118      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    110       ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7 
     119      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
    111120
    112121#elif defined( __x86_64__ )
     
    128137      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke;
    129138      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520
    130       ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7 
     139      ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F;  //Vol. 1 8-7
    131140#else
    132141      #error Only __i386__ and __x86_64__ is supported for threads in cfa
  • src/libcfa/concurrency/invoke.h

    rfea3faa rb826e6b  
    3131      struct spinlock {
    3232            volatile int lock;
     33            #ifdef __CFA_DEBUG__
     34                  const char * prev_name;
     35                  void* prev_thrd;
     36            #endif
    3337      };
    3438
     
    8387            struct __thread_queue_t entry_queue;      // queue of threads that are blocked waiting for the monitor
    8488            struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
    85             struct monitor_desc * stack_owner;        // if bulk acquiring was used we need to synchronize signals with an other monitor
    8689            unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
    8790      };
     
    99102#ifndef _INVOKE_PRIVATE_H_
    100103#define _INVOKE_PRIVATE_H_
    101      
     104
    102105      struct machine_context_t {
    103106            void *SP;
     
    109112      extern void CtxInvokeStub( void );
    110113      void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
    111       void CtxGet( void * this ) asm ("CtxGet");
     114
     115      #if   defined( __x86_64__ )
     116      #define CtxGet( ctx ) __asm__ ( \
     117                  "movq %%rsp,%0\n"   \
     118                  "movq %%rbp,%1\n"   \
     119            : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     120      #elif defined( __i386__ )
     121      #define CtxGet( ctx ) __asm__ ( \
     122                  "movl %%esp,%0\n"   \
     123                  "movl %%ebp,%1\n"   \
     124            : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     125      #endif
    112126
    113127#endif //_INVOKE_PRIVATE_H_
  • src/libcfa/concurrency/kernel

    rfea3faa rb826e6b  
    2828//-----------------------------------------------------------------------------
    2929// Locks
    30 bool try_lock( spinlock * );
    31 void lock( spinlock * );
    32 void unlock( spinlock * );
     30void lock      ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, spin if already acquired
     31void lock_yield( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, yield repeatedly if already acquired
     32bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, return false if already acquired
     33void unlock    ( spinlock * );                        // Unlock the spinlock
    3334
    34 struct signal_once {
    35         volatile bool cond;
    36         struct spinlock lock;
    37         struct __thread_queue_t blocked;
     35struct semaphore {
     36        spinlock lock;
     37        int count;
     38        __thread_queue_t waiting;
    3839};
    3940
    40 void ?{}(signal_once * this);
    41 void ^?{}(signal_once * this);
     41void  ?{}(semaphore * this, int count = 1);
     42void ^?{}(semaphore * this);
     43void P(semaphore * this);
     44void V(semaphore * this);
    4245
    43 void wait( signal_once * );
    44 void signal( signal_once * );
    4546
    4647//-----------------------------------------------------------------------------
    4748// Cluster
    4849struct cluster {
    49         __thread_queue_t ready_queue;
    50         spinlock lock;
     50        spinlock ready_queue_lock;                      // Ready queue locks
     51        __thread_queue_t ready_queue;                   // Ready queue for threads
     52        unsigned long long int preemption;              // Preemption rate on this cluster
    5153};
    5254
     
    6870        unsigned short thrd_count;
    6971};
    70 static inline void ?{}(FinishAction * this) { 
     72static inline void ?{}(FinishAction * this) {
    7173        this->action_code = No_Action;
    7274        this->thrd = NULL;
     
    7577static inline void ^?{}(FinishAction * this) {}
    7678
     79// Processor
     80// Wrapper around kernel threads
    7781struct processor {
    78         struct processorCtx_t * runner;
    79         cluster * cltr;
    80         coroutine_desc * current_coroutine;
    81         thread_desc * current_thread;
    82         pthread_t kernel_thread;
    83        
    84         signal_once terminated;
    85         volatile bool is_terminated;
     82        // Main state
     83        struct processorCtx_t * runner;                 // Coroutine ctx who does keeps the state of the processor
     84        cluster * cltr;                                 // Cluster from which to get threads
     85        pthread_t kernel_thread;                        // Handle to pthreads
    8686
    87         struct FinishAction finish;
     87        // Termination
     88        volatile bool do_terminate;                     // Set to true to notify the processor should terminate
     89        semaphore terminated;                           // Termination synchronisation
    8890
    89         struct alarm_node_t * preemption_alarm;
    90         unsigned int preemption;
     91        // RunThread data
     92        struct FinishAction finish;                     // Action to do after a thread is ran
    9193
    92         unsigned short disable_preempt_count;
     94        // Preemption data
     95        struct alarm_node_t * preemption_alarm;         // Node which is added in the discrete event simulaiton
     96        bool pending_preemption;                        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
    9397
    94         bool pending_preemption;
     98#ifdef __CFA_DEBUG__
     99        char * last_enable;                             // Last function to enable preemption on this processor
     100#endif
    95101};
    96102
  • src/libcfa/concurrency/kernel.c

    rfea3faa rb826e6b  
    1515//
    1616
    17 #include "startup.h"
    18 
    19 //Start and stop routine for the kernel, declared first to make sure they run first
    20 void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    21 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
    22 
    23 //Header
    24 #include "kernel_private.h"
     17#include "libhdr.h"
    2518
    2619//C Includes
     
    3528
    3629//CFA Includes
    37 #include "libhdr.h"
     30#include "kernel_private.h"
    3831#include "preemption.h"
     32#include "startup.h"
    3933
    4034//Private includes
     
    4236#include "invoke.h"
    4337
     38//Start and stop routine for the kernel, declared first to make sure they run first
     39void kernel_startup(void)  __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     40void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     41
    4442//-----------------------------------------------------------------------------
    4543// Kernel storage
    46 #define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)]
    47 
    48 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx);
    49 KERNEL_STORAGE(cluster, systemCluster);
    50 KERNEL_STORAGE(system_proc_t, systemProcessor);
    51 KERNEL_STORAGE(thread_desc, mainThread);
    52 KERNEL_STORAGE(machine_context_t, mainThread_context);
    53 
    54 cluster * systemCluster;
    55 system_proc_t * systemProcessor;
     44KERNEL_STORAGE(cluster,           mainCluster);
     45KERNEL_STORAGE(processor,         mainProcessor);
     46KERNEL_STORAGE(processorCtx_t,    mainProcessorCtx);
     47KERNEL_STORAGE(thread_desc,       mainThread);
     48KERNEL_STORAGE(machine_context_t, mainThreadCtx);
     49
     50cluster *     mainCluster;
     51processor *   mainProcessor;
    5652thread_desc * mainThread;
    5753
     
    5955// Global state
    6056
    61 thread_local processor * this_processor;
    62 
    63 coroutine_desc * this_coroutine(void) {
    64         return this_processor->current_coroutine;
    65 }
    66 
    67 thread_desc * this_thread(void) {
    68         return this_processor->current_thread;
    69 }
     57volatile thread_local coroutine_desc * this_coroutine;
     58volatile thread_local thread_desc * this_thread;
     59volatile thread_local processor * this_processor;
     60
     61volatile thread_local bool preemption_in_progress = 0;
     62volatile thread_local unsigned short disable_preempt_count = 1;
    7063
    7164//-----------------------------------------------------------------------------
    7265// Main thread construction
    7366struct current_stack_info_t {
    74         machine_context_t ctx; 
     67        machine_context_t ctx;
    7568        unsigned int size;              // size of stack
    7669        void *base;                             // base of stack
     
    8275
    8376void ?{}( current_stack_info_t * this ) {
    84         CtxGet( &this->ctx );
     77        CtxGet( this->ctx );
    8578        this->base = this->ctx.FP;
    8679        this->storage = this->ctx.SP;
     
    9184
    9285        this->limit = (void *)(((intptr_t)this->base) - this->size);
    93         this->context = &mainThread_context_storage;
     86        this->context = &storage_mainThreadCtx;
    9487        this->top = this->base;
    9588}
     
    10699
    107100void ?{}( coroutine_desc * this, current_stack_info_t * info) {
    108         (&this->stack){ info }; 
     101        (&this->stack){ info };
    109102        this->name = "Main Thread";
    110103        this->errno_ = 0;
     
    131124
    132125void ?{}(processor * this) {
    133         this{ systemCluster };
     126        this{ mainCluster };
    134127}
    135128
    136129void ?{}(processor * this, cluster * cltr) {
    137130        this->cltr = cltr;
    138         this->current_coroutine = NULL;
    139         this->current_thread = NULL;
    140         (&this->terminated){};
    141         this->is_terminated = false;
     131        (&this->terminated){ 0 };
     132        this->do_terminate = false;
    142133        this->preemption_alarm = NULL;
    143         this->preemption = default_preemption();
    144         this->disable_preempt_count = 1;                //Start with interrupts disabled
    145134        this->pending_preemption = false;
    146135
     
    150139void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) {
    151140        this->cltr = cltr;
    152         this->current_coroutine = NULL;
    153         this->current_thread = NULL;
    154         (&this->terminated){};
    155         this->is_terminated = false;
    156         this->disable_preempt_count = 0;
     141        (&this->terminated){ 0 };
     142        this->do_terminate = false;
     143        this->preemption_alarm = NULL;
    157144        this->pending_preemption = false;
     145        this->kernel_thread = pthread_self();
    158146
    159147        this->runner = runner;
    160         LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);
     148        LIB_DEBUG_PRINT_SAFE("Kernel : constructing main processor context %p\n", runner);
    161149        runner{ this };
    162150}
    163151
    164 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) {
    165         (&this->alarms){};
    166         (&this->alarm_lock){};
    167         this->pending_alarm = false;
    168 
    169         (&this->proc){ cltr, runner };
    170 }
    171 
    172152void ^?{}(processor * this) {
    173         if( ! this->is_terminated ) {
     153        if( ! this->do_terminate ) {
    174154                LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", this);
    175                 this->is_terminated = true;
    176                 wait( &this->terminated );
     155                this->do_terminate = true;
     156                P( &this->terminated );
     157                pthread_join( this->kernel_thread, NULL );
    177158        }
    178159}
     
    180161void ?{}(cluster * this) {
    181162        ( &this->ready_queue ){};
    182         ( &this->lock ){};
     163        ( &this->ready_queue_lock ){};
     164
     165        this->preemption = default_preemption();
    183166}
    184167
    185168void ^?{}(cluster * this) {
    186        
     169
    187170}
    188171
     
    203186
    204187                thread_desc * readyThread = NULL;
    205                 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ )
     188                for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ )
    206189                {
    207190                        readyThread = nextThread( this->cltr );
     
    209192                        if(readyThread)
    210193                        {
     194                                verify( disable_preempt_count > 0 );
     195
    211196                                runThread(this, readyThread);
     197
     198                                verify( disable_preempt_count > 0 );
    212199
    213200                                //Some actions need to be taken from the kernel
     
    225212        }
    226213
    227         signal( &this->terminated );
     214        V( &this->terminated );
     215
    228216        LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this);
    229217}
    230218
    231 // runThread runs a thread by context switching 
    232 // from the processor coroutine to the target thread 
     219// runThread runs a thread by context switching
     220// from the processor coroutine to the target thread
    233221void runThread(processor * this, thread_desc * dst) {
    234222        coroutine_desc * proc_cor = get_coroutine(this->runner);
    235223        coroutine_desc * thrd_cor = get_coroutine(dst);
    236        
     224
    237225        //Reset the terminating actions here
    238226        this->finish.action_code = No_Action;
    239227
    240228        //Update global state
    241         this->current_thread = dst;
     229        this_thread = dst;
    242230
    243231        // Context Switch to the thread
     
    246234}
    247235
    248 // Once a thread has finished running, some of 
     236// Once a thread has finished running, some of
    249237// its final actions must be executed from the kernel
    250238void finishRunning(processor * this) {
     
    256244        }
    257245        else if( this->finish.action_code == Release_Schedule ) {
    258                 unlock( this->finish.lock );           
     246                unlock( this->finish.lock );
    259247                ScheduleThread( this->finish.thrd );
    260248        }
     
    289277        processor * proc = (processor *) arg;
    290278        this_processor = proc;
     279        this_coroutine = NULL;
     280        this_thread = NULL;
     281        disable_preempt_count = 1;
    291282        // SKULLDUGGERY: We want to create a context for the processor coroutine
    292283        // which is needed for the 2-step context switch. However, there is no reason
    293         // to waste the perfectly valid stack create by pthread. 
     284        // to waste the perfectly valid stack create by pthread.
    294285        current_stack_info_t info;
    295286        machine_context_t ctx;
     
    300291
    301292        //Set global state
    302         proc->current_coroutine = &proc->runner->__cor;
    303         proc->current_thread = NULL;
     293        this_coroutine = &proc->runner->__cor;
     294        this_thread = NULL;
    304295
    305296        //We now have a proper context from which to schedule threads
    306297        LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx);
    307298
    308         // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 
    309         // resume it to start it like it normally would, it will just context switch 
    310         // back to here. Instead directly call the main since we already are on the 
     299        // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't
     300        // resume it to start it like it normally would, it will just context switch
     301        // back to here. Instead directly call the main since we already are on the
    311302        // appropriate stack.
    312303        proc_cor_storage.__cor.state = Active;
     
    315306
    316307        // Main routine of the core returned, the core is now fully terminated
    317         LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 
     308        LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner);
    318309
    319310        return NULL;
     
    322313void start(processor * this) {
    323314        LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this);
    324        
     315
    325316        pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );
    326317
    327         LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);       
     318        LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this);
    328319}
    329320
     
    331322// Scheduler routines
    332323void ScheduleThread( thread_desc * thrd ) {
    333         if( !thrd ) return;
     324        // if( !thrd ) return;
     325        assert( thrd );
     326        assert( thrd->cor.state != Halted );
     327
     328        verify( disable_preempt_count > 0 );
    334329
    335330        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    336        
    337         lock( &systemProcessor->proc.cltr->lock );
    338         append( &systemProcessor->proc.cltr->ready_queue, thrd );
    339         unlock( &systemProcessor->proc.cltr->lock );
     331
     332        lock(   &this_processor->cltr->ready_queue_lock DEBUG_CTX2 );
     333        append( &this_processor->cltr->ready_queue, thrd );
     334        unlock( &this_processor->cltr->ready_queue_lock );
     335
     336        verify( disable_preempt_count > 0 );
    340337}
    341338
    342339thread_desc * nextThread(cluster * this) {
    343         lock( &this->lock );
     340        verify( disable_preempt_count > 0 );
     341        lock( &this->ready_queue_lock DEBUG_CTX2 );
    344342        thread_desc * head = pop_head( &this->ready_queue );
    345         unlock( &this->lock );
     343        unlock( &this->ready_queue_lock );
     344        verify( disable_preempt_count > 0 );
    346345        return head;
    347346}
    348347
    349 void ScheduleInternal() {
    350         suspend();
    351 }
    352 
    353 void ScheduleInternal( spinlock * lock ) {
     348void BlockInternal() {
     349        disable_interrupts();
     350        verify( disable_preempt_count > 0 );
     351        suspend();
     352        verify( disable_preempt_count > 0 );
     353        enable_interrupts( DEBUG_CTX );
     354}
     355
     356void BlockInternal( spinlock * lock ) {
     357        disable_interrupts();
    354358        this_processor->finish.action_code = Release;
    355359        this_processor->finish.lock = lock;
    356         suspend();
    357 }
    358 
    359 void ScheduleInternal( thread_desc * thrd ) {
     360
     361        verify( disable_preempt_count > 0 );
     362        suspend();
     363        verify( disable_preempt_count > 0 );
     364
     365        enable_interrupts( DEBUG_CTX );
     366}
     367
     368void BlockInternal( thread_desc * thrd ) {
     369        disable_interrupts();
     370        assert( thrd->cor.state != Halted );
    360371        this_processor->finish.action_code = Schedule;
    361372        this_processor->finish.thrd = thrd;
    362         suspend();
    363 }
    364 
    365 void ScheduleInternal( spinlock * lock, thread_desc * thrd ) {
     373
     374        verify( disable_preempt_count > 0 );
     375        suspend();
     376        verify( disable_preempt_count > 0 );
     377
     378        enable_interrupts( DEBUG_CTX );
     379}
     380
     381void BlockInternal( spinlock * lock, thread_desc * thrd ) {
     382        disable_interrupts();
    366383        this_processor->finish.action_code = Release_Schedule;
    367384        this_processor->finish.lock = lock;
    368385        this_processor->finish.thrd = thrd;
    369         suspend();
    370 }
    371 
    372 void ScheduleInternal(spinlock ** locks, unsigned short count) {
     386
     387        verify( disable_preempt_count > 0 );
     388        suspend();
     389        verify( disable_preempt_count > 0 );
     390
     391        enable_interrupts( DEBUG_CTX );
     392}
     393
     394void BlockInternal(spinlock ** locks, unsigned short count) {
     395        disable_interrupts();
    373396        this_processor->finish.action_code = Release_Multi;
    374397        this_processor->finish.locks = locks;
    375398        this_processor->finish.lock_count = count;
    376         suspend();
    377 }
    378 
    379 void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
     399
     400        verify( disable_preempt_count > 0 );
     401        suspend();
     402        verify( disable_preempt_count > 0 );
     403
     404        enable_interrupts( DEBUG_CTX );
     405}
     406
     407void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) {
     408        disable_interrupts();
    380409        this_processor->finish.action_code = Release_Multi_Schedule;
    381410        this_processor->finish.locks = locks;
     
    383412        this_processor->finish.thrds = thrds;
    384413        this_processor->finish.thrd_count = thrd_count;
     414
     415        verify( disable_preempt_count > 0 );
     416        suspend();
     417        verify( disable_preempt_count > 0 );
     418
     419        enable_interrupts( DEBUG_CTX );
     420}
     421
     422void LeaveThread(spinlock * lock, thread_desc * thrd) {
     423        verify( disable_preempt_count > 0 );
     424        this_processor->finish.action_code = thrd ? Release_Schedule : Release;
     425        this_processor->finish.lock = lock;
     426        this_processor->finish.thrd = thrd;
     427
    385428        suspend();
    386429}
     
    392435// Kernel boot procedures
    393436void kernel_startup(void) {
    394         LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");   
     437        LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n");
    395438
    396439        // Start by initializing the main thread
    397         // SKULLDUGGERY: the mainThread steals the process main thread 
    398         // which will then be scheduled by the systemProcessor normally
    399         mainThread = (thread_desc *)&mainThread_storage;
     440        // SKULLDUGGERY: the mainThread steals the process main thread
     441        // which will then be scheduled by the mainProcessor normally
     442        mainThread = (thread_desc *)&storage_mainThread;
    400443        current_stack_info_t info;
    401444        mainThread{ &info };
     
    403446        LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n");
    404447
     448        // Initialize the main cluster
     449        mainCluster = (cluster *)&storage_mainCluster;
     450        mainCluster{};
     451
     452        LIB_DEBUG_PRINT_SAFE("Kernel : main cluster ready\n");
     453
     454        // Initialize the main processor and the main processor ctx
     455        // (the coroutine that contains the processing control flow)
     456        mainProcessor = (processor *)&storage_mainProcessor;
     457        mainProcessor{ mainCluster, (processorCtx_t *)&storage_mainProcessorCtx };
     458
     459        //initialize the global state variables
     460        this_processor = mainProcessor;
     461        this_thread = mainThread;
     462        this_coroutine = &mainThread->cor;
     463
    405464        // Enable preemption
    406465        kernel_start_preemption();
    407466
    408         // Initialize the system cluster
    409         systemCluster = (cluster *)&systemCluster_storage;
    410         systemCluster{};
    411 
    412         LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n");
    413 
    414         // Initialize the system processor and the system processor ctx
    415         // (the coroutine that contains the processing control flow)
    416         systemProcessor = (system_proc_t *)&systemProcessor_storage;
    417         systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage };
    418 
    419         // Add the main thread to the ready queue
    420         // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread
     467        // Add the main thread to the ready queue
     468        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    421469        ScheduleThread(mainThread);
    422470
    423         //initialize the global state variables
    424         this_processor = &systemProcessor->proc;
    425         this_processor->current_thread = mainThread;
    426         this_processor->current_coroutine = &mainThread->cor;
    427 
    428         // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX
     471        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    429472        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    430         // mainThread is on the ready queue when this call is made. 
    431         resume( systemProcessor->proc.runner );
     473        // mainThread is on the ready queue when this call is made.
     474        resume( mainProcessor->runner );
    432475
    433476
     
    435478        // THE SYSTEM IS NOW COMPLETELY RUNNING
    436479        LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n");
     480
     481        enable_interrupts( DEBUG_CTX );
    437482}
    438483
     
    440485        LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n");
    441486
    442         // SKULLDUGGERY: Notify the systemProcessor it needs to terminates.
     487        disable_interrupts();
     488
     489        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
    443490        // When its coroutine terminates, it return control to the mainThread
    444491        // which is currently here
    445         systemProcessor->proc.is_terminated = true;
     492        mainProcessor->do_terminate = true;
    446493        suspend();
    447494
    448495        // THE SYSTEM IS NOW COMPLETELY STOPPED
    449496
    450         // Destroy the system processor and its context in reverse order of construction
     497        // Disable preemption
     498        kernel_stop_preemption();
     499
     500        // Destroy the main processor and its context in reverse order of construction
    451501        // These were manually constructed so we need manually destroy them
    452         ^(systemProcessor->proc.runner){};
    453         ^(systemProcessor){};
     502        ^(mainProcessor->runner){};
     503        ^(mainProcessor){};
    454504
    455505        // Final step, destroy the main thread since it is no longer needed
     
    457507        ^(mainThread){};
    458508
    459         LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");   
     509        LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n");
    460510}
    461511
     
    467517        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
    468518        // the globalAbort flag is true.
    469         lock( &kernel_abort_lock );
     519        lock( &kernel_abort_lock DEBUG_CTX2 );
    470520
    471521        // first task to abort ?
     
    473523                kernel_abort_called = true;
    474524                unlock( &kernel_abort_lock );
    475         } 
     525        }
    476526        else {
    477527                unlock( &kernel_abort_lock );
    478                
     528
    479529                sigset_t mask;
    480530                sigemptyset( &mask );
     
    482532                sigaddset( &mask, SIGUSR1 );                    // block SIGUSR1 signals
    483533                sigsuspend( &mask );                            // block the processor to prevent further damage during abort
    484                 _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it             
    485         }
    486 
    487         return this_thread();
     534                _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it
     535        }
     536
     537        return this_thread;
    488538}
    489539
     
    494544        __lib_debug_write( STDERR_FILENO, abort_text, len );
    495545
    496         if ( thrd != this_coroutine() ) {
    497                 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine()->name, this_coroutine() );
     546        if ( thrd != this_coroutine ) {
     547                len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine );
    498548                __lib_debug_write( STDERR_FILENO, abort_text, len );
    499         } 
     549        }
    500550        else {
    501551                __lib_debug_write( STDERR_FILENO, ".\n", 2 );
     
    505555extern "C" {
    506556        void __lib_debug_acquire() {
    507                 lock(&kernel_debug_lock);
     557                lock( &kernel_debug_lock DEBUG_CTX2 );
    508558        }
    509559
    510560        void __lib_debug_release() {
    511                 unlock(&kernel_debug_lock);
     561                unlock( &kernel_debug_lock );
    512562        }
    513563}
     
    525575}
    526576
    527 bool try_lock( spinlock * this ) {
     577bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) {
    528578        return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0;
    529579}
    530580
    531 void lock( spinlock * this ) {
     581void lock( spinlock * this DEBUG_CTX_PARAM2 ) {
    532582        for ( unsigned int i = 1;; i += 1 ) {
    533                 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break;
    534         }
    535 }
     583                if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }
     584        }
     585        LIB_DEBUG_DO(
     586                this->prev_name = caller;
     587                this->prev_thrd = this_thread;
     588        )
     589}
     590
     591void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) {
     592        for ( unsigned int i = 1;; i += 1 ) {
     593                if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; }
     594                yield();
     595        }
     596        LIB_DEBUG_DO(
     597                this->prev_name = caller;
     598                this->prev_thrd = this_thread;
     599        )
     600}
     601
    536602
    537603void unlock( spinlock * this ) {
     
    539605}
    540606
    541 void ?{}( signal_once * this ) {
    542         this->cond = false;
    543 }
    544 void ^?{}( signal_once * this ) {
    545 
    546 }
    547 
    548 void wait( signal_once * this ) {
    549         lock( &this->lock );
    550         if( !this->cond ) {
    551                 append( &this->blocked, this_thread() );
    552                 ScheduleInternal( &this->lock );
    553                 lock( &this->lock );
    554         }
     607void  ?{}( semaphore * this, int count = 1 ) {
     608        (&this->lock){};
     609        this->count = count;
     610        (&this->waiting){};
     611}
     612void ^?{}(semaphore * this) {}
     613
     614void P(semaphore * this) {
     615        lock( &this->lock DEBUG_CTX2 );
     616        this->count -= 1;
     617        if ( this->count < 0 ) {
     618                // queue current task
     619                append( &this->waiting, (thread_desc *)this_thread );
     620
     621                // atomically release spin lock and block
     622                BlockInternal( &this->lock );
     623        }
     624        else {
     625            unlock( &this->lock );
     626        }
     627}
     628
     629void V(semaphore * this) {
     630        thread_desc * thrd = NULL;
     631        lock( &this->lock DEBUG_CTX2 );
     632        this->count += 1;
     633        if ( this->count <= 0 ) {
     634                // remove task at head of waiting list
     635                thrd = pop_head( &this->waiting );
     636        }
     637
    555638        unlock( &this->lock );
    556 }
    557 
    558 void signal( signal_once * this ) {
    559         lock( &this->lock );
    560         {
    561                 this->cond = true;
    562 
    563                 thread_desc * it;
    564                 while( it = pop_head( &this->blocked) ) {
    565                         ScheduleThread( it );
    566                 }
    567         }
    568         unlock( &this->lock );
     639
     640        // make new owner
     641        WakeThread( thrd );
    569642}
    570643
     
    590663                }
    591664                head->next = NULL;
    592         }       
     665        }
    593666        return head;
    594667}
     
    609682                this->top = top->next;
    610683                top->next = NULL;
    611         }       
     684        }
    612685        return top;
    613686}
  • src/libcfa/concurrency/kernel_private.h

    rfea3faa rb826e6b  
    1818#define KERNEL_PRIVATE_H
    1919
     20#include "libhdr.h"
     21
    2022#include "kernel"
    2123#include "thread"
     
    2325#include "alarm.h"
    2426
    25 #include "libhdr.h"
    2627
    2728//-----------------------------------------------------------------------------
    2829// Scheduler
     30
     31extern "C" {
     32        void disable_interrupts();
     33        void enable_interrupts_noPoll();
     34        void enable_interrupts( DEBUG_CTX_PARAM );
     35}
     36
    2937void ScheduleThread( thread_desc * );
     38static inline void WakeThread( thread_desc * thrd ) {
     39        if( !thrd ) return;
     40
     41        disable_interrupts();
     42        ScheduleThread( thrd );
     43        enable_interrupts( DEBUG_CTX );
     44}
    3045thread_desc * nextThread(cluster * this);
    3146
    32 void ScheduleInternal(void);
    33 void ScheduleInternal(spinlock * lock);
    34 void ScheduleInternal(thread_desc * thrd);
    35 void ScheduleInternal(spinlock * lock, thread_desc * thrd);
    36 void ScheduleInternal(spinlock ** locks, unsigned short count);
    37 void ScheduleInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);
     47//Block current thread and release/wake-up the following resources
     48void BlockInternal(void);
     49void BlockInternal(spinlock * lock);
     50void BlockInternal(thread_desc * thrd);
     51void BlockInternal(spinlock * lock, thread_desc * thrd);
     52void BlockInternal(spinlock ** locks, unsigned short count);
     53void BlockInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);
     54void LeaveThread(spinlock * lock, thread_desc * thrd);
    3855
    3956//-----------------------------------------------------------------------------
     
    4966void spin(processor * this, unsigned int * spin_count);
    5067
    51 struct system_proc_t {
    52         processor proc;
    53 
     68struct event_kernel_t {
    5469        alarm_list_t alarms;
    55         spinlock alarm_lock;
    56 
    57         bool pending_alarm;
     70        spinlock lock;
    5871};
    5972
    60 extern cluster * systemCluster;
    61 extern system_proc_t * systemProcessor;
    62 extern thread_local processor * this_processor;
     73extern event_kernel_t * event_kernel;
    6374
    64 static inline void disable_interrupts() {
    65         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, 1, __ATOMIC_SEQ_CST );
    66         assert( prev != (unsigned short) -1 );
    67 }
    68 
    69 static inline void enable_interrupts_noRF() {
    70         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    71         verify( prev != (unsigned short) 0 );
    72 }
    73 
    74 static inline void enable_interrupts() {
    75         __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST );
    76         verify( prev != (unsigned short) 0 );
    77         if( prev == 1 && this_processor->pending_preemption ) {
    78                 ScheduleInternal( this_processor->current_thread );
    79                 this_processor->pending_preemption = false;
    80         }
    81 }
     75extern volatile thread_local processor * this_processor;
     76extern volatile thread_local coroutine_desc * this_coroutine;
     77extern volatile thread_local thread_desc * this_thread;
     78extern volatile thread_local bool preemption_in_progress;
     79extern volatile thread_local unsigned short disable_preempt_count;
    8280
    8381//-----------------------------------------------------------------------------
     
    9088extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
    9189
     90//-----------------------------------------------------------------------------
     91// Utils
     92#define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
     93
    9294#endif //KERNEL_PRIVATE_H
    9395
  • src/libcfa/concurrency/monitor

    rfea3faa rb826e6b  
    2626static inline void ?{}(monitor_desc * this) {
    2727        this->owner = NULL;
    28         this->stack_owner = NULL;
    2928        this->recursion = 0;
    3029}
  • src/libcfa/concurrency/monitor.c

    rfea3faa rb826e6b  
    1919#include <stdlib>
    2020
     21#include "libhdr.h"
    2122#include "kernel_private.h"
    22 #include "libhdr.h"
    2323
    2424//-----------------------------------------------------------------------------
     
    4444
    4545extern "C" {
    46         void __enter_monitor_desc(monitor_desc * this) {
    47                 lock( &this->lock );
    48                 thread_desc * thrd = this_thread();
    49 
    50                 LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
     46        void __enter_monitor_desc( monitor_desc * this ) {
     47                lock_yield( &this->lock DEBUG_CTX2 );
     48                thread_desc * thrd = this_thread;
     49
     50                // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
    5151
    5252                if( !this->owner ) {
     
    6262                        //Some one else has the monitor, wait in line for it
    6363                        append( &this->entry_queue, thrd );
    64                         LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
    65                         ScheduleInternal( &this->lock );
    66 
    67                         //ScheduleInternal will unlock spinlock, no need to unlock ourselves
    68                         return; 
     64                        // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);
     65                        BlockInternal( &this->lock );
     66
     67                        //BlockInternal will unlock spinlock, no need to unlock ourselves
     68                        return;
    6969                }
    7070
     
    7575        // leave pseudo code :
    7676        //      TODO
    77         void __leave_monitor_desc(monitor_desc * this) {
    78                 lock( &this->lock );
    79 
    80                 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);
    81                 verifyf( this_thread() == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion );
     77        void __leave_monitor_desc( monitor_desc * this ) {
     78                lock_yield( &this->lock DEBUG_CTX2 );
     79
     80                // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion);
     81                verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion );
    8282
    8383                //Leaving a recursion level, decrement the counter
     
    9696                unlock( &this->lock );
    9797
    98                 LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
     98                // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);
    9999
    100100                //We need to wake-up the thread
    101                 ScheduleThread( new_owner );
     101                WakeThread( new_owner );
     102        }
     103
     104        void __leave_thread_monitor( thread_desc * thrd ) {
     105                monitor_desc * this = &thrd->mon;
     106                lock_yield( &this->lock DEBUG_CTX2 );
     107
     108                disable_interrupts();
     109
     110                thrd->cor.state = Halted;
     111
     112                verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion );
     113
     114                //Leaving a recursion level, decrement the counter
     115                this->recursion -= 1;
     116
     117                //If we haven't left the last level of recursion
     118                //it means we don't need to do anything
     119                if( this->recursion != 0) {
     120                        unlock( &this->lock );
     121                        return;
     122                }
     123
     124                thread_desc * new_owner = next_thread( this );
     125
     126                LeaveThread( &this->lock, new_owner );
    102127        }
    103128}
     
    121146        enter( this->m, this->count );
    122147
    123         this->prev_mntrs = this_thread()->current_monitors;
    124         this->prev_count = this_thread()->current_monitor_count;
    125 
    126         this_thread()->current_monitors      = m;
    127         this_thread()->current_monitor_count = count;
     148        this->prev_mntrs = this_thread->current_monitors;
     149        this->prev_count = this_thread->current_monitor_count;
     150
     151        this_thread->current_monitors      = m;
     152        this_thread->current_monitor_count = count;
    128153}
    129154
     
    131156        leave( this->m, this->count );
    132157
    133         this_thread()->current_monitors      = this->prev_mntrs;
    134         this_thread()->current_monitor_count = this->prev_count;
     158        this_thread->current_monitors      = this->prev_mntrs;
     159        this_thread->current_monitor_count = this->prev_count;
    135160}
    136161
     
    159184// Internal scheduling
    160185void wait( condition * this, uintptr_t user_info = 0 ) {
    161         LIB_DEBUG_PRINT_SAFE("Waiting\n");
     186        // LIB_DEBUG_PRINT_SAFE("Waiting\n");
    162187
    163188        brand_condition( this );
     
    170195        unsigned short count = this->monitor_count;
    171196        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    172         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
    173 
    174         LIB_DEBUG_PRINT_SAFE("count %i\n", count);
    175 
    176         __condition_node_t waiter = { this_thread(), count, user_info };
     197        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
     198
     199        // LIB_DEBUG_PRINT_SAFE("count %i\n", count);
     200
     201        __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info };
    177202
    178203        __condition_criterion_t criteria[count];
    179204        for(int i = 0; i < count; i++) {
    180205                (&criteria[i]){ this->monitors[i], &waiter };
    181                 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
     206                // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
    182207        }
    183208
     
    201226        }
    202227
    203         LIB_DEBUG_PRINT_SAFE("Will unblock: ");
     228        // LIB_DEBUG_PRINT_SAFE("Will unblock: ");
    204229        for(int i = 0; i < thread_count; i++) {
    205                 LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
    206         }
    207         LIB_DEBUG_PRINT_SAFE("\n");
     230                // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);
     231        }
     232        // LIB_DEBUG_PRINT_SAFE("\n");
    208233
    209234        // Everything is ready to go to sleep
    210         ScheduleInternal( locks, count, threads, thread_count );
     235        BlockInternal( locks, count, threads, thread_count );
    211236
    212237
     
    222247bool signal( condition * this ) {
    223248        if( is_empty( this ) ) {
    224                 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
     249                // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");
    225250                return false;
    226251        }
     
    231256
    232257        unsigned short count = this->monitor_count;
    233        
     258
    234259        //Some more checking in debug
    235260        LIB_DEBUG_DO(
    236                 thread_desc * this_thrd = this_thread();
     261                thread_desc * this_thrd = this_thread;
    237262                if ( this->monitor_count != this_thrd->current_monitor_count ) {
    238263                        abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count );
     
    248273        //Lock all the monitors
    249274        lock_all( this->monitors, NULL, count );
    250         LIB_DEBUG_PRINT_SAFE("Signalling");
     275        // LIB_DEBUG_PRINT_SAFE("Signalling");
    251276
    252277        //Pop the head of the waiting queue
     
    256281        for(int i = 0; i < count; i++) {
    257282                __condition_criterion_t * crit = &node->criteria[i];
    258                 LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
     283                // LIB_DEBUG_PRINT_SAFE(" %p", crit->target);
    259284                assert( !crit->ready );
    260285                push( &crit->target->signal_stack, crit );
    261286        }
    262287
    263         LIB_DEBUG_PRINT_SAFE("\n");
     288        // LIB_DEBUG_PRINT_SAFE("\n");
    264289
    265290        //Release
     
    281306        unsigned short count = this->monitor_count;
    282307        unsigned int recursions[ count ];               //Save the current recursion levels to restore them later
    283         spinlock *   locks     [ count ];               //We need to pass-in an array of locks to ScheduleInternal
     308        spinlock *   locks     [ count ];               //We need to pass-in an array of locks to BlockInternal
    284309
    285310        lock_all( this->monitors, locks, count );
    286311
    287312        //create creteria
    288         __condition_node_t waiter = { this_thread(), count, 0 };
     313        __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 };
    289314
    290315        __condition_criterion_t criteria[count];
    291316        for(int i = 0; i < count; i++) {
    292317                (&criteria[i]){ this->monitors[i], &waiter };
    293                 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
     318                // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );
    294319                push( &criteria[i].target->signal_stack, &criteria[i] );
    295320        }
     
    309334
    310335        //Everything is ready to go to sleep
    311         ScheduleInternal( locks, count, &signallee, 1 );
     336        BlockInternal( locks, count, &signallee, 1 );
    312337
    313338
     
    325350
    326351uintptr_t front( condition * this ) {
    327         verifyf( !is_empty(this), 
     352        verifyf( !is_empty(this),
    328353                "Attempt to access user data on an empty condition.\n"
    329354                "Possible cause is not checking if the condition is empty before reading stored data."
     
    335360// Internal scheduling
    336361void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) {
    337         // thread_desc * this = this_thread();
     362        // thread_desc * this = this_thread;
    338363
    339364        // unsigned short count = this->current_monitor_count;
    340365        // unsigned int recursions[ count ];            //Save the current recursion levels to restore them later
    341         // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to ScheduleInternal
     366        // spinlock *   locks     [ count ];            //We need to pass-in an array of locks to BlockInternal
    342367
    343368        // lock_all( this->current_monitors, locks, count );
     
    348373
    349374        // // // Everything is ready to go to sleep
    350         // // ScheduleInternal( locks, count, threads, thread_count );
     375        // // BlockInternal( locks, count, threads, thread_count );
    351376
    352377
     
    393418static inline void lock_all( spinlock ** locks, unsigned short count ) {
    394419        for( int i = 0; i < count; i++ ) {
    395                 lock( locks[i] );
     420                lock_yield( locks[i] DEBUG_CTX2 );
    396421        }
    397422}
     
    400425        for( int i = 0; i < count; i++ ) {
    401426                spinlock * l = &source[i]->lock;
    402                 lock( l );
     427                lock_yield( l DEBUG_CTX2 );
    403428                if(locks) locks[i] = l;
    404429        }
     
    443468        for(    int i = 0; i < count; i++ ) {
    444469
    445                 LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
     470                // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );
    446471                if( &criteria[i] == target ) {
    447472                        criteria[i].ready = true;
    448                         LIB_DEBUG_PRINT_SAFE( "True\n" );
     473                        // LIB_DEBUG_PRINT_SAFE( "True\n" );
    449474                }
    450475
     
    452477        }
    453478
    454         LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
     479        // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );
    455480        return ready2run ? node->waiting_thread : NULL;
    456481}
    457482
    458483static inline void brand_condition( condition * this ) {
    459         thread_desc * thrd = this_thread();
     484        thread_desc * thrd = this_thread;
    460485        if( !this->monitors ) {
    461                 LIB_DEBUG_PRINT_SAFE("Branding\n");
     486                // LIB_DEBUG_PRINT_SAFE("Branding\n");
    462487                assertf( thrd->current_monitors != NULL, "No current monitor to brand condition", thrd->current_monitors );
    463488                this->monitor_count = thrd->current_monitor_count;
  • src/libcfa/concurrency/preemption.c

    rfea3faa rb826e6b  
    1515//
    1616
     17#include "libhdr.h"
    1718#include "preemption.h"
    1819
    1920extern "C" {
     21#include <errno.h>
     22#include <execinfo.h>
     23#define __USE_GNU
    2024#include <signal.h>
    21 }
    22 
    23 #define __CFA_DEFAULT_PREEMPTION__ 10
    24 
     25#undef __USE_GNU
     26#include <stdio.h>
     27#include <string.h>
     28#include <unistd.h>
     29}
     30
     31
     32#ifdef __USE_STREAM__
     33#include "fstream"
     34#endif
     35
     36//TODO move to defaults
     37#define __CFA_DEFAULT_PREEMPTION__ 10000
     38
     39//TODO move to defaults
    2540__attribute__((weak)) unsigned int default_preemption() {
    2641        return __CFA_DEFAULT_PREEMPTION__;
    2742}
    2843
     44// Short hands for signal context information
     45#define __CFA_SIGCXT__ ucontext_t *
     46#define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt
     47
     48// FwdDeclarations : timeout handlers
    2949static void preempt( processor   * this );
    3050static void timeout( thread_desc * this );
    3151
     52// FwdDeclarations : Signal handlers
     53void sigHandler_ctxSwitch( __CFA_SIGPARMS__ );
     54void sigHandler_segv     ( __CFA_SIGPARMS__ );
     55void sigHandler_abort    ( __CFA_SIGPARMS__ );
     56
     57// FwdDeclarations : sigaction wrapper
     58static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags );
     59
     60// FwdDeclarations : alarm thread main
     61void * alarm_loop( __attribute__((unused)) void * args );
     62
     63// Machine specific register name
     64#ifdef __x86_64__
     65#define CFA_REG_IP REG_RIP
     66#else
     67#define CFA_REG_IP REG_EIP
     68#endif
     69
     70KERNEL_STORAGE(event_kernel_t, event_kernel);         // private storage for event kernel
     71event_kernel_t * event_kernel;                        // kernel public handle to even kernel
     72static pthread_t alarm_thread;                        // pthread handle to alarm thread
     73
     74void ?{}(event_kernel_t * this) {
     75        (&this->alarms){};
     76        (&this->lock){};
     77}
     78
    3279//=============================================================================================
    3380// Kernel Preemption logic
    3481//=============================================================================================
    3582
    36 void kernel_start_preemption() {
    37 
    38 }
    39 
     83// Get next expired node
     84static inline alarm_node_t * get_expired( alarm_list_t * alarms, __cfa_time_t currtime ) {
     85        if( !alarms->head ) return NULL;                          // If no alarms return null
     86        if( alarms->head->alarm >= currtime ) return NULL;        // If alarms head not expired return null
     87        return pop(alarms);                                       // Otherwise just pop head
     88}
     89
     90// Tick one frame of the Discrete Event Simulation for alarms
    4091void tick_preemption() {
    41         alarm_list_t * alarms = &systemProcessor->alarms;
    42         __cfa_time_t currtime = __kernel_get_time();
    43         while( alarms->head && alarms->head->alarm < currtime ) {
    44                 alarm_node_t * node = pop(alarms);
     92        alarm_node_t * node = NULL;                     // Used in the while loop but cannot be declared in the while condition
     93        alarm_list_t * alarms = &event_kernel->alarms;  // Local copy for ease of reading
     94        __cfa_time_t currtime = __kernel_get_time();    // Check current time once so we everything "happens at once"
     95
     96        //Loop throught every thing expired
     97        while( node = get_expired( alarms, currtime ) ) {
     98
     99                // Check if this is a kernel
    45100                if( node->kernel_alarm ) {
    46101                        preempt( node->proc );
     
    50105                }
    51106
    52                 if( node->period > 0 ) {
    53                         node->alarm += node->period;
    54                         insert( alarms, node );
     107                // Check if this is a periodic alarm
     108                __cfa_time_t period = node->period;
     109                if( period > 0 ) {
     110                        node->alarm = currtime + period;    // Alarm is periodic, add currtime to it (used cached current time)
     111                        insert( alarms, node );             // Reinsert the node for the next time it triggers
    55112                }
    56113                else {
    57                         node->set = false;
    58                 }
    59         }
    60 
    61         if( alarms->head ) {
    62                 __kernel_set_timer( alarms->head->alarm - currtime );
    63         }
    64 }
    65 
     114                        node->set = false;                  // Node is one-shot, just mark it as not pending
     115                }
     116        }
     117
     118        // If there are still alarms pending, reset the timer
     119        if( alarms->head ) { __kernel_set_timer( alarms->head->alarm - currtime ); }
     120}
     121
     122// Update the preemption of a processor and notify interested parties
    66123void update_preemption( processor * this, __cfa_time_t duration ) {
    67         //     assert( THREAD_GETMEM( disableInt ) && THREAD_GETMEM( disableIntCnt ) == 1 );
    68124        alarm_node_t * alarm = this->preemption_alarm;
    69125
     
    89145}
    90146
     147//=============================================================================================
     148// Kernel Signal Tools
     149//=============================================================================================
     150
     151LIB_DEBUG_DO( static thread_local void * last_interrupt = 0; )
     152
     153extern "C" {
     154        // Disable interrupts by incrementing the counter
     155        void disable_interrupts() {
     156                __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST );
     157                verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
     158        }
     159
     160        // Enable interrupts by decrementing the counter
     161        // If counter reaches 0, execute any pending CtxSwitch
     162        void enable_interrupts( DEBUG_CTX_PARAM ) {
     163                processor * proc   = this_processor;      // Cache the processor now since interrupts can start happening after the atomic add
     164                thread_desc * thrd = this_thread;         // Cache the thread now since interrupts can start happening after the atomic add
     165
     166                unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
     167                verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
     168
     169                // Check if we need to prempt the thread because an interrupt was missed
     170                if( prev == 1 && proc->pending_preemption ) {
     171                        proc->pending_preemption = false;
     172                        BlockInternal( thrd );
     173                }
     174
     175                // For debugging purposes : keep track of the last person to enable the interrupts
     176                LIB_DEBUG_DO( proc->last_enable = caller; )
     177        }
     178
     179        // Disable interrupts by incrementint the counter
     180        // Don't execute any pending CtxSwitch even if counter reaches 0
     181        void enable_interrupts_noPoll() {
     182                __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST );
     183                verify( prev != 0u );                     // If this triggers someone is enabled already enabled interrupts
     184        }
     185}
     186
     187// sigprocmask wrapper : unblock a single signal
     188static inline void signal_unblock( int sig ) {
     189        sigset_t mask;
     190        sigemptyset( &mask );
     191        sigaddset( &mask, sig );
     192
     193        if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) {
     194            abortf( "internal error, pthread_sigmask" );
     195        }
     196}
     197
     198// sigprocmask wrapper : block a single signal
     199static inline void signal_block( int sig ) {
     200        sigset_t mask;
     201        sigemptyset( &mask );
     202        sigaddset( &mask, sig );
     203
     204        if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
     205            abortf( "internal error, pthread_sigmask" );
     206        }
     207}
     208
     209// kill wrapper : signal a processor
     210static void preempt( processor * this ) {
     211        pthread_kill( this->kernel_thread, SIGUSR1 );
     212}
     213
     214// reserved for future use
     215static void timeout( thread_desc * this ) {
     216        //TODO : implement waking threads
     217}
     218
     219
     220// Check if a CtxSwitch signal handler shoud defer
     221// If true  : preemption is safe
     222// If false : preemption is unsafe and marked as pending
     223static inline bool preemption_ready() {
     224        bool ready = disable_preempt_count == 0 && !preemption_in_progress; // Check if preemption is safe
     225        this_processor->pending_preemption = !ready;                        // Adjust the pending flag accordingly
     226        return ready;
     227}
     228
     229//=============================================================================================
     230// Kernel Signal Startup/Shutdown logic
     231//=============================================================================================
     232
     233// Startup routine to activate preemption
     234// Called from kernel_startup
     235void kernel_start_preemption() {
     236        LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n");
     237
     238        // Start with preemption disabled until ready
     239        disable_preempt_count = 1;
     240
     241        // Initialize the event kernel
     242        event_kernel = (event_kernel_t *)&storage_event_kernel;
     243        event_kernel{};
     244
     245        // Setup proper signal handlers
     246        __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO );         // CtxSwitch handler
     247        // __kernel_sigaction( SIGSEGV, sigHandler_segv     , SA_SIGINFO );      // Failure handler
     248        // __kernel_sigaction( SIGBUS , sigHandler_segv     , SA_SIGINFO );      // Failure handler
     249
     250        signal_block( SIGALRM );
     251
     252        pthread_create( &alarm_thread, NULL, alarm_loop, NULL );
     253}
     254
     255// Shutdown routine to deactivate preemption
     256// Called from kernel_shutdown
     257void kernel_stop_preemption() {
     258        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopping\n");
     259
     260        // Block all signals since we are already shutting down
     261        sigset_t mask;
     262        sigfillset( &mask );
     263        sigprocmask( SIG_BLOCK, &mask, NULL );
     264
     265        // Notify the alarm thread of the shutdown
     266        sigval val = { 1 };
     267        pthread_sigqueue( alarm_thread, SIGALRM, val );
     268
     269        // Wait for the preemption thread to finish
     270        pthread_join( alarm_thread, NULL );
     271
     272        // Preemption is now fully stopped
     273
     274        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n");
     275}
     276
     277// Raii ctor/dtor for the preemption_scope
     278// Used by thread to control when they want to receive preemption signals
    91279void ?{}( preemption_scope * this, processor * proc ) {
    92         (&this->alarm){ proc };
     280        (&this->alarm){ proc, zero_time, zero_time };
    93281        this->proc = proc;
    94282        this->proc->preemption_alarm = &this->alarm;
    95         update_preemption( this->proc, this->proc->preemption );
     283
     284        update_preemption( this->proc, from_us(this->proc->cltr->preemption) );
    96285}
    97286
    98287void ^?{}( preemption_scope * this ) {
    99         update_preemption( this->proc, 0 );
    100 }
    101 
    102 //=============================================================================================
    103 // Kernel Signal logic
    104 //=============================================================================================
    105 
    106 static inline bool preemption_ready() {
    107         return this_processor->disable_preempt_count == 0;
    108 }
    109 
    110 static inline void defer_ctxSwitch() {
    111         this_processor->pending_preemption = true;
    112 }
    113 
    114 static inline void defer_alarm() {
    115         systemProcessor->pending_alarm = true;
    116 }
    117 
    118 void sigHandler_ctxSwitch( __attribute__((unused)) int sig ) {
    119         if( preemption_ready() ) {
    120                 ScheduleInternal( this_processor->current_thread );
    121         }
    122         else {
    123                 defer_ctxSwitch();
    124         }
    125 }
    126 
    127 void sigHandler_alarm( __attribute__((unused)) int sig ) {
    128         if( try_lock( &systemProcessor->alarm_lock ) ) {
    129                 tick_preemption();
    130                 unlock( &systemProcessor->alarm_lock );
    131         }
    132         else {
    133                 defer_alarm();
    134         }
    135 }
    136 
    137 static void preempt( processor * this ) {
    138         pthread_kill( this->kernel_thread, SIGUSR1 );
    139 }
    140 
    141 static void timeout( thread_desc * this ) {
    142         //TODO : implement waking threads
    143 }
     288        disable_interrupts();
     289
     290        update_preemption( this->proc, zero_time );
     291}
     292
     293//=============================================================================================
     294// Kernel Signal Handlers
     295//=============================================================================================
     296
     297// Context switch signal handler
     298// Receives SIGUSR1 signal and causes the current thread to yield
     299void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) {
     300        LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); )
     301
     302        // Check if it is safe to preempt here
     303        if( !preemption_ready() ) { return; }
     304
     305        preemption_in_progress = true;                      // Sync flag : prevent recursive calls to the signal handler
     306        signal_unblock( SIGUSR1 );                          // We are about to CtxSwitch out of the signal handler, let other handlers in
     307        preemption_in_progress = false;                     // Clear the in progress flag
     308
     309        // Preemption can occur here
     310
     311        BlockInternal( (thread_desc*)this_thread );         // Do the actual CtxSwitch
     312}
     313
     314// Main of the alarm thread
     315// Waits on SIGALRM and send SIGUSR1 to whom ever needs it
     316void * alarm_loop( __attribute__((unused)) void * args ) {
     317        // Block sigalrms to control when they arrive
     318        sigset_t mask;
     319        sigemptyset( &mask );
     320        sigaddset( &mask, SIGALRM );
     321
     322        if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
     323            abortf( "internal error, pthread_sigmask" );
     324        }
     325
     326        // Main loop
     327        while( true ) {
     328                // Wait for a sigalrm
     329                siginfo_t info;
     330                int sig = sigwaitinfo( &mask, &info );
     331
     332                // If another signal arrived something went wrong
     333                assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int);
     334
     335                LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int );
     336                // Switch on the code (a.k.a. the sender) to
     337                switch( info.si_code )
     338                {
     339                // Timers can apparently be marked as sent for the kernel
     340                // In either case, tick preemption
     341                case SI_TIMER:
     342                case SI_KERNEL:
     343                        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n");
     344                        lock( &event_kernel->lock DEBUG_CTX2 );
     345                        tick_preemption();
     346                        unlock( &event_kernel->lock );
     347                        break;
     348                // Signal was not sent by the kernel but by an other thread
     349                case SI_QUEUE:
     350                        // For now, other thread only signal the alarm thread to shut it down
     351                        // If this needs to change use info.si_value and handle the case here
     352                        goto EXIT;
     353                }
     354        }
     355
     356EXIT:
     357        LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread stopping\n");
     358        return NULL;
     359}
     360
     361// Sigaction wrapper : register an signal handler
     362static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) {
     363        struct sigaction act;
     364
     365        act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler;
     366        act.sa_flags = flags;
     367
     368        if ( sigaction( sig, &act, NULL ) == -1 ) {
     369                LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO,
     370                        " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n",
     371                        sig, handler, flags, errno, strerror( errno )
     372                );
     373                _exit( EXIT_FAILURE );
     374        }
     375}
     376
     377// Sigaction wrapper : restore default handler
     378static void __kernel_sigdefault( int sig ) {
     379        struct sigaction act;
     380
     381        act.sa_handler = SIG_DFL;
     382        act.sa_flags = 0;
     383        sigemptyset( &act.sa_mask );
     384
     385        if ( sigaction( sig, &act, NULL ) == -1 ) {
     386                LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO,
     387                        " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n",
     388                        sig, errno, strerror( errno )
     389                );
     390                _exit( EXIT_FAILURE );
     391        }
     392}
     393
     394//=============================================================================================
     395// Terminating Signals logic
     396//=============================================================================================
     397
     398LIB_DEBUG_DO(
     399        static void __kernel_backtrace( int start ) {
     400                // skip first N stack frames
     401
     402                enum { Frames = 50 };
     403                void * array[Frames];
     404                int size = backtrace( array, Frames );
     405                char ** messages = backtrace_symbols( array, size );
     406
     407                // find executable name
     408                *index( messages[0], '(' ) = '\0';
     409                #ifdef __USE_STREAM__
     410                serr | "Stack back trace for:" | messages[0] | endl;
     411                #else
     412                fprintf( stderr, "Stack back trace for: %s\n", messages[0]);
     413                #endif
     414
     415                // skip last 2 stack frames after main
     416                for ( int i = start; i < size && messages != NULL; i += 1 ) {
     417                        char * name = NULL;
     418                        char * offset_begin = NULL;
     419                        char * offset_end = NULL;
     420
     421                        for ( char *p = messages[i]; *p; ++p ) {
     422                                // find parantheses and +offset
     423                                if ( *p == '(' ) {
     424                                        name = p;
     425                                }
     426                                else if ( *p == '+' ) {
     427                                        offset_begin = p;
     428                                }
     429                                else if ( *p == ')' ) {
     430                                        offset_end = p;
     431                                        break;
     432                                }
     433                        }
     434
     435                        // if line contains symbol print it
     436                        int frameNo = i - start;
     437                        if ( name && offset_begin && offset_end && name < offset_begin ) {
     438                                // delimit strings
     439                                *name++ = '\0';
     440                                *offset_begin++ = '\0';
     441                                *offset_end++ = '\0';
     442
     443                                #ifdef __USE_STREAM__
     444                                serr    | "("  | frameNo | ")" | messages[i] | ":"
     445                                        | name | "+" | offset_begin | offset_end | endl;
     446                                #else
     447                                fprintf( stderr, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end);
     448                                #endif
     449                        }
     450                        // otherwise, print the whole line
     451                        else {
     452                                #ifdef __USE_STREAM__
     453                                serr | "(" | frameNo | ")" | messages[i] | endl;
     454                                #else
     455                                fprintf( stderr, "(%i) %s\n", frameNo, messages[i] );
     456                                #endif
     457                        }
     458                }
     459
     460                free( messages );
     461        }
     462)
     463
     464// void sigHandler_segv( __CFA_SIGPARMS__ ) {
     465//      LIB_DEBUG_DO(
     466//              #ifdef __USE_STREAM__
     467//              serr    | "*CFA runtime error* program cfa-cpp terminated with"
     468//                      | (sig == SIGSEGV ? "segment fault." : "bus error.")
     469//                      | endl;
     470//              #else
     471//              fprintf( stderr, "*CFA runtime error* program cfa-cpp terminated with %s\n", sig == SIGSEGV ? "segment fault." : "bus error." );
     472//              #endif
     473
     474//              // skip first 2 stack frames
     475//              __kernel_backtrace( 1 );
     476//      )
     477//      exit( EXIT_FAILURE );
     478// }
     479
     480// void sigHandler_abort( __CFA_SIGPARMS__ ) {
     481//      // skip first 6 stack frames
     482//      LIB_DEBUG_DO( __kernel_backtrace( 6 ); )
     483
     484//      // reset default signal handler
     485//      __kernel_sigdefault( SIGABRT );
     486
     487//      raise( SIGABRT );
     488// }
  • src/libcfa/concurrency/thread

    rfea3faa rb826e6b  
    5454}
    5555
    56 thread_desc * this_thread(void);
     56extern volatile thread_local thread_desc * this_thread;
    5757
    5858forall( dtype T | is_thread(T) )
  • src/libcfa/concurrency/thread.c

    rfea3faa rb826e6b  
    2828}
    2929
    30 extern thread_local processor * this_processor;
     30extern volatile thread_local processor * this_processor;
    3131
    3232//-----------------------------------------------------------------------------
     
    7171        coroutine_desc* thrd_c = get_coroutine(this);
    7272        thread_desc*  thrd_h = get_thread   (this);
    73         thrd_c->last = this_coroutine();
    74         this_processor->current_coroutine = thrd_c;
     73        thrd_c->last = this_coroutine;
    7574
    76         LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
     75        // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);
    7776
     77        disable_interrupts();
    7878        create_stack(&thrd_c->stack, thrd_c->stack.size);
     79        this_coroutine = thrd_c;
    7980        CtxStart(this, CtxInvokeThread);
     81        assert( thrd_c->last->stack.context );
    8082        CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context );
    8183
    8284        ScheduleThread(thrd_h);
     85        enable_interrupts( DEBUG_CTX );
    8386}
    8487
    8588void yield( void ) {
    86         ScheduleInternal( this_processor->current_thread );
     89        BlockInternal( (thread_desc *)this_thread );
    8790}
    8891
     
    9598void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    9699        // set state of current coroutine to inactive
    97         src->state = Inactive;
     100        src->state = src->state == Halted ? Halted : Inactive;
    98101        dst->state = Active;
    99102
     
    103106        // set new coroutine that the processor is executing
    104107        // and context switch to it
    105         this_processor->current_coroutine = dst;
     108        this_coroutine = dst;
     109        assert( src->stack.context );
    106110        CtxSwitch( src->stack.context, dst->stack.context );
    107         this_processor->current_coroutine = src;
     111        this_coroutine = src;
    108112
    109113        // set state of new coroutine to active
    110         dst->state = Inactive;
     114        dst->state = dst->state == Halted ? Halted : Inactive;
    111115        src->state = Active;
    112116}
Note: See TracChangeset for help on using the changeset viewer.