Ignore:
Timestamp:
May 18, 2018, 2:09:21 PM (7 years ago)
Author:
Aaron Moss <a3moss@…>
Branches:
new-env, with_gc
Children:
2472a19
Parents:
f6f0cca3 (diff), c7d8100c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge remote-tracking branch 'origin/master' into with_gc

Location:
src/libcfa/concurrency
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/coroutine

    rf6f0cca3 rff29f08  
    3232//-----------------------------------------------------------------------------
    3333// Ctors and dtors
    34 void ?{}(coStack_t & this);
    35 void ?{}(coroutine_desc & this);
    36 void ?{}(coroutine_desc & this, const char * name);
    37 void ^?{}(coStack_t & this);
    38 void ^?{}(coroutine_desc & this);
     34// void ?{}( coStack_t & this );
     35// void ^?{}( coStack_t & this );
     36
     37void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize );
     38void ^?{}( coroutine_desc & this );
     39
     40static inline void ?{}( coroutine_desc & this)                                       { this{ "Anonymous Coroutine", NULL, 0 }; }
     41static inline void ?{}( coroutine_desc & this, size_t stackSize)                     { this{ "Anonymous Coroutine", NULL, stackSize }; }
     42static inline void ?{}( coroutine_desc & this, void * storage, size_t storageSize )  { this{ "Anonymous Coroutine", storage, storageSize }; }
     43static inline void ?{}( coroutine_desc & this, const char * name)                    { this{ name, NULL, 0 }; }
     44static inline void ?{}( coroutine_desc & this, const char * name, size_t stackSize ) { this{ name, NULL, stackSize }; }
    3945
    4046//-----------------------------------------------------------------------------
     
    6672// Suspend implementation inlined for performance
    6773static inline void suspend() {
    68         coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
     74        // optimization : read TLS once and reuse it
     75        // Safety note: this is preemption safe since if
     76        // preemption occurs after this line, the pointer
     77        // will also migrate which means this value will
     78        // stay in syn with the TLS
     79        coroutine_desc * src = TL_GET( this_coroutine );
    6980
    7081        assertf( src->last != 0,
     
    8394forall(dtype T | is_coroutine(T))
    8495static inline void resume(T & cor) {
    85         coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
     96        // optimization : read TLS once and reuse it
     97        // Safety note: this is preemption safe since if
     98        // preemption occurs after this line, the pointer
     99        // will also migrate which means this value will
     100        // stay in syn with the TLS
     101        coroutine_desc * src = TL_GET( this_coroutine );
    86102        coroutine_desc * dst = get_coroutine(cor);
    87103
     
    101117                dst->last = src;
    102118                dst->starter = dst->starter ? dst->starter : src;
    103         } // if
     119        }
    104120
    105121        // always done for performance testing
     
    108124
    109125static inline void resume(coroutine_desc * dst) {
    110         coroutine_desc * src = TL_GET( this_coroutine );                        // optimization
     126        // optimization : read TLS once and reuse it
     127        // Safety note: this is preemption safe since if
     128        // preemption occurs after this line, the pointer
     129        // will also migrate which means this value will
     130        // stay in syn with the TLS
     131        coroutine_desc * src = TL_GET( this_coroutine );
    111132
    112133        // not resuming self ?
     
    119140                // set last resumer
    120141                dst->last = src;
    121         } // if
     142        }
    122143
    123144        // always done for performance testing
  • src/libcfa/concurrency/coroutine.c

    rf6f0cca3 rff29f08  
    3939//-----------------------------------------------------------------------------
    4040// Coroutine ctors and dtors
    41 void ?{}(coStack_t& this) with( this ) {
    42         size            = 65000;        // size of stack
    43         storage = NULL; // pointer to stack
    44         limit           = NULL; // stack grows towards stack limit
    45         base            = NULL; // base of stack
    46         context = NULL; // address of cfa_context_t
    47         top             = NULL; // address of top of storage
    48         userStack       = false;
    49 }
    50 
    51 void ?{}(coStack_t& this, size_t size) {
    52         this{};
    53         this.size = size;
    54 
    55         create_stack(&this, this.size);
    56 }
    57 
    58 void ?{}(coroutine_desc& this) {
    59         this{ "Anonymous Coroutine" };
    60 }
    61 
    62 void ?{}(coroutine_desc& this, const char * name) with( this ) {
    63         this.name = name;
    64         errno_ = 0;
    65         state = Start;
    66         starter = NULL;
    67         last = NULL;
    68 }
    69 
    70 void ?{}(coroutine_desc& this, size_t size) {
    71         this{};
    72         (this.stack){size};
     41void ?{}( coStack_t & this, void * storage, size_t storageSize ) with( this ) {
     42      size               = storageSize == 0 ? 65000 : storageSize; // size of stack
     43      this.storage = storage;                                // pointer to stack
     44      limit              = NULL;                                   // stack grows towards stack limit
     45      base               = NULL;                                   // base of stack
     46      context    = NULL;                                   // address of cfa_context_t
     47      top                = NULL;                                   // address of top of storage
     48      userStack  = storage != NULL;
    7349}
    7450
    7551void ^?{}(coStack_t & this) {
    76         if ( ! this.userStack && this.storage ) {
    77                 __cfaabi_dbg_debug_do(
    78                         if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) {
    79                                 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
    80                         }
    81                 );
    82                 free( this.storage );
    83         }
     52      if ( ! this.userStack && this.storage ) {
     53            __cfaabi_dbg_debug_do(
     54                  if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) {
     55                        abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
     56                  }
     57            );
     58            free( this.storage );
     59      }
     60}
     61
     62void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) {
     63      (this.stack){storage, storageSize};
     64      this.name = name;
     65      errno_ = 0;
     66      state = Start;
     67      starter = NULL;
     68      last = NULL;
    8469}
    8570
     
    9075forall(dtype T | is_coroutine(T))
    9176void prime(T& cor) {
    92         coroutine_desc* this = get_coroutine(cor);
    93         assert(this->state == Start);
     77      coroutine_desc* this = get_coroutine(cor);
     78      assert(this->state == Start);
    9479
    95         this->state = Primed;
    96         resume(cor);
     80      this->state = Primed;
     81      resume(cor);
    9782}
    9883
    9984// Wrapper for co
    10085void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    101         verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate );
    102         disable_interrupts();
     86      // Safety note : This could cause some false positives due to preemption
     87      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
     88      disable_interrupts();
    10389
    104         // set state of current coroutine to inactive
    105         src->state = src->state == Halted ? Halted : Inactive;
     90      // set state of current coroutine to inactive
     91      src->state = src->state == Halted ? Halted : Inactive;
    10692
    107         // set new coroutine that task is executing
    108         TL_SET( this_coroutine, dst );
     93      // set new coroutine that task is executing
     94      kernelTLS.this_coroutine = dst;
    10995
    110         // context switch to specified coroutine
    111         assert( src->stack.context );
    112         CtxSwitch( src->stack.context, dst->stack.context );
    113         // when CtxSwitch returns we are back in the src coroutine
     96      // context switch to specified coroutine
     97      assert( src->stack.context );
     98      CtxSwitch( src->stack.context, dst->stack.context );
     99      // when CtxSwitch returns we are back in the src coroutine
    114100
    115         // set state of new coroutine to active
    116         src->state = Active;
     101      // set state of new coroutine to active
     102      src->state = Active;
    117103
    118         enable_interrupts( __cfaabi_dbg_ctx );
    119         verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate );
     104      enable_interrupts( __cfaabi_dbg_ctx );
     105      // Safety note : This could cause some false positives due to preemption
     106      verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate );
    120107} //ctxSwitchDirect
    121108
    122109void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) {
    123         //TEMP HACK do this on proper kernel startup
    124         if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
     110      //TEMP HACK do this on proper kernel startup
     111      if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE );
    125112
    126         size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
     113      size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment
    127114
    128         if ( (intptr_t)storage == 0 ) {
    129                 userStack = false;
    130                 size = libCeiling( storageSize, 16 );
    131                 // use malloc/memalign because "new" raises an exception for out-of-memory
     115      if ( !storage ) {
     116            __cfaabi_dbg_print_safe("Kernel : Creating stack of size %zu for stack obj %p\n", cxtSize + size + 8, this);
    132117
    133                 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
    134                 __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) );
    135                 __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) );
     118            userStack = false;
     119            size = libCeiling( storageSize, 16 );
     120            // use malloc/memalign because "new" raises an exception for out-of-memory
    136121
    137                 __cfaabi_dbg_debug_do(
    138                         if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) {
    139                                 abort( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) );
    140                         } // if
    141                 );
     122            // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment
     123            __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) );
     124            __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) );
    142125
    143                 if ( (intptr_t)storage == 0 ) {
    144                         abort( "Attempt to allocate %zd bytes of storage for coroutine or task execution-state but insufficient memory available.", size );
    145                 } // if
     126            __cfaabi_dbg_debug_do(
     127                  if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) {
     128                        abort( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) );
     129                  } // if
     130            );
    146131
    147                 __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize );
    148                 __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment
     132            if ( (intptr_t)storage == 0 ) {
     133                  abort( "Attempt to allocate %zd bytes of storage for coroutine or task execution-state but insufficient memory available.", size );
     134            } // if
    149135
    150         } else {
    151                 assertf( ((size_t)storage & (libAlign() - 1)) != 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() );
    152                 userStack = true;
    153                 size = storageSize - cxtSize;
     136            __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize );
     137            __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment
    154138
    155                 if ( size % 16 != 0u ) size -= 8;
     139      } else {
     140            __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%u bytes)\n", this, storage, storageSize);
    156141
    157                 limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment
    158         } // if
    159         assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
     142            assertf( ((size_t)storage & (libAlign() - 1)) == 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() );
     143            userStack = true;
     144            size = storageSize - cxtSize;
    160145
    161         base = (char *)limit + size;
    162         context = base;
    163         top = (char *)context + cxtSize;
     146            if ( size % 16 != 0u ) size -= 8;
     147
     148            limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment
     149      } // if
     150      assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );
     151
     152      base = (char *)limit + size;
     153      context = base;
     154      top = (char *)context + cxtSize;
    164155}
    165156
     
    167158// is not inline (We can't inline Cforall in C)
    168159extern "C" {
    169         void __suspend_internal(void) {
    170                 suspend();
    171         }
     160      void __suspend_internal(void) {
     161            suspend();
     162      }
    172163
    173         void __leave_coroutine(void) {
    174                 coroutine_desc * src = TL_GET( this_coroutine ); // optimization
     164      void __leave_coroutine(void) {
     165            coroutine_desc * src = TL_GET( this_coroutine ); // optimization
    175166
    176                 assertf( src->starter != 0,
    177                         "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
    178                         "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
    179                         src->name, src );
    180                 assertf( src->starter->state != Halted,
    181                         "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
    182                         "Possible cause is terminated coroutine's main routine has already returned.",
    183                         src->name, src, src->starter->name, src->starter );
     167            assertf( src->starter != 0,
     168                  "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
     169                  "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
     170                  src->name, src );
     171            assertf( src->starter->state != Halted,
     172                  "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
     173                  "Possible cause is terminated coroutine's main routine has already returned.",
     174                  src->name, src, src->starter->name, src->starter );
    184175
    185                 CoroutineCtxSwitch( src, src->starter );
    186         }
     176            CoroutineCtxSwitch( src, src->starter );
     177      }
    187178}
    188179
  • src/libcfa/concurrency/invoke.c

    rf6f0cca3 rff29f08  
    6969        // Fetch the thread handle from the user defined thread structure
    7070        struct thread_desc* thrd = get_thread( this );
     71        thrd->self_cor.last = NULL;
    7172
    7273        // Officially start the thread by enabling preemption
  • src/libcfa/concurrency/invoke.h

    rf6f0cca3 rff29f08  
    1818#include "bits/locks.h"
    1919
    20 #define TL_GET( member ) kernelThreadData.member
    21 #define TL_SET( member, value ) kernelThreadData.member = value;
     20#define TL_GET( member ) kernelTLS.member
     21#define TL_SET( member, value ) kernelTLS.member = value;
    2222
    2323#ifdef __cforall
     
    4444                                volatile bool in_progress;
    4545                        } preemption_state;
    46                 } kernelThreadData;
     46                } kernelTLS;
    4747        }
    4848
    4949        static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); }
    50         static inline struct thread_desc * volatile active_thread() { return TL_GET( this_thread ); }
    51         static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); }
     50        static inline struct thread_desc    * volatile active_thread   () { return TL_GET( this_thread    ); }
     51        static inline struct processor      * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE
    5252        #endif
    5353
     
    126126                struct monitor_desc *  self_mon_p;
    127127
     128                // pointer to the cluster on which the thread is running
     129                struct cluster * curr_cluster;
     130
    128131                // monitors currently held by this thread
    129132                struct __monitor_group_t monitors;
     
    133136                struct thread_desc * next;
    134137
    135                 __cfaabi_dbg_debug_do(
    136                         // instrusive link field for debugging
    137                         struct thread_desc * dbg_next;
    138                         struct thread_desc * dbg_prev;
    139                 )
     138                struct {
     139                        struct thread_desc * next;
     140                        struct thread_desc * prev;
     141                } node;
    140142     };
    141143
     
    144146                static inline thread_desc * & get_next( thread_desc & this ) {
    145147                        return this.next;
     148                }
     149
     150                static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) {
     151                        return this.node.[next, prev];
    146152                }
    147153
  • src/libcfa/concurrency/kernel

    rf6f0cca3 rff29f08  
    4040
    4141//-----------------------------------------------------------------------------
    42 // Cluster
    43 struct cluster {
    44         // Ready queue locks
    45         __spinlock_t ready_queue_lock;
     42// Processor
     43extern struct cluster * mainCluster;
    4644
    47         // Ready queue for threads
    48         __queue_t(thread_desc) ready_queue;
    49 
    50         // Preemption rate on this cluster
    51         Duration preemption_rate;
    52 };
    53 
    54 extern Duration default_preemption();
    55 
    56 void ?{} (cluster & this);
    57 void ^?{}(cluster & this);
    58 
    59 //-----------------------------------------------------------------------------
    60 // Processor
    6145enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule };
    6246
     
    9074
    9175        // Cluster from which to get threads
    92         cluster * cltr;
     76        struct cluster * cltr;
     77
     78        // Name of the processor
     79        const char * name;
    9380
    9481        // Handle to pthreads
     
    113100        bool pending_preemption;
    114101
     102        // Idle lock
     103
     104        // Link lists fields
     105        struct {
     106                struct processor * next;
     107                struct processor * prev;
     108        } node;
     109
    115110#ifdef __CFA_DEBUG__
    116111        // Last function to enable preemption on this processor
     
    119114};
    120115
    121 void  ?{}(processor & this);
    122 void  ?{}(processor & this, cluster * cltr);
     116void  ?{}(processor & this, const char * name, struct cluster & cltr);
    123117void ^?{}(processor & this);
     118
     119static inline void  ?{}(processor & this)                    { this{ "Anonymous Processor", *mainCluster}; }
     120static inline void  ?{}(processor & this, struct cluster & cltr)    { this{ "Anonymous Processor", cltr}; }
     121static inline void  ?{}(processor & this, const char * name) { this{name, *mainCluster }; }
     122
     123static inline [processor *&, processor *& ] __get( processor & this ) {
     124        return this.node.[next, prev];
     125}
     126
     127//-----------------------------------------------------------------------------
     128// Cluster
     129struct cluster {
     130        // Ready queue locks
     131        __spinlock_t ready_queue_lock;
     132
     133        // Ready queue for threads
     134        __queue_t(thread_desc) ready_queue;
     135
     136        // Name of the cluster
     137        const char * name;
     138
     139        // Preemption rate on this cluster
     140        Duration preemption_rate;
     141
     142        // List of processors
     143        __spinlock_t proc_list_lock;
     144        __dllist_t(struct processor) procs;
     145        __dllist_t(struct processor) idles;
     146
     147        // Link lists fields
     148        struct {
     149                cluster * next;
     150                cluster * prev;
     151        } node;
     152};
     153extern Duration default_preemption();
     154
     155void ?{} (cluster & this, const char * name, Duration preemption_rate);
     156void ^?{}(cluster & this);
     157
     158static inline void ?{} (cluster & this)                           { this{"Anonymous Cluster", default_preemption()}; }
     159static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; }
     160static inline void ?{} (cluster & this, const char * name)        { this{name, default_preemption()}; }
     161
     162static inline [cluster *&, cluster *& ] __get( cluster & this ) {
     163        return this.node.[next, prev];
     164}
    124165
    125166// Local Variables: //
  • src/libcfa/concurrency/kernel.c

    rf6f0cca3 rff29f08  
    4242KERNEL_STORAGE(cluster,           mainCluster);
    4343KERNEL_STORAGE(processor,         mainProcessor);
    44 KERNEL_STORAGE(processorCtx_t,    mainProcessorCtx);
    4544KERNEL_STORAGE(thread_desc,       mainThread);
    4645KERNEL_STORAGE(machine_context_t, mainThreadCtx);
    4746
    48 cluster *    mainCluster;
    49 processor mainProcessor;
     47cluster     * mainCluster;
     48processor   * mainProcessor;
    5049thread_desc * mainThread;
     50
     51struct { __dllist_t(thread_desc) list; __spinlock_t lock; } global_threads ;
     52struct { __dllist_t(cluster    ) list; __spinlock_t lock; } global_clusters;
    5153
    5254//-----------------------------------------------------------------------------
     
    5759// volatile thread_local unsigned short disable_preempt_count = 1;
    5860
    59 thread_local struct KernelThreadData kernelThreadData = {
     61thread_local struct KernelThreadData kernelTLS = {
    6062        NULL,
    6163        NULL,
     
    6567
    6668//-----------------------------------------------------------------------------
    67 // Main thread construction
     69// Struct to steal stack
    6870struct current_stack_info_t {
    6971        machine_context_t ctx;
     
    9092}
    9193
     94//-----------------------------------------------------------------------------
     95// Main thread construction
    9296void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) {
    9397        size      = info->size;
     
    111115        self_cor{ info };
    112116        curr_cor = &self_cor;
     117        curr_cluster = mainCluster;
    113118        self_mon.owner = &this;
    114119        self_mon.recursion = 1;
    115120        self_mon_p = &self_mon;
    116121        next = NULL;
    117         __cfaabi_dbg_debug_do(
    118                 dbg_next = NULL;
    119                 dbg_prev = NULL;
    120                 __cfaabi_dbg_thread_register(&this);
    121         )
     122
     123        node.next = NULL;
     124        node.prev = NULL;
     125        doregister(this);
    122126
    123127        monitors{ &self_mon_p, 1, (fptr_t)0 };
     
    126130//-----------------------------------------------------------------------------
    127131// Processor coroutine
    128 void ?{}(processorCtx_t & this) {}
    129 
    130 // Construct the processor context of the main processor
    131 void ?{}(processorCtx_t & this, processor * proc) {
    132         (this.__cor){ "Processor" };
    133         this.__cor.starter = NULL;
    134         this.proc = proc;
     132void ?{}(processorCtx_t & this) {
     133
    135134}
    136135
     
    141140}
    142141
    143 void ?{}(processor & this) {
    144         this{ mainCluster };
    145 }
    146 
    147 void ?{}(processor & this, cluster * cltr) with( this ) {
    148         this.cltr = cltr;
     142void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {
     143        this.name = name;
     144        this.cltr = &cltr;
    149145        terminated{ 0 };
    150146        do_terminate = false;
     
    156152}
    157153
    158 void ?{}(processor & this, cluster * cltr, processorCtx_t & runner) with( this ) {
    159         this.cltr = cltr;
    160         terminated{ 0 };
    161         do_terminate = false;
    162         preemption_alarm = NULL;
    163         pending_preemption = false;
    164         kernel_thread = pthread_self();
    165         runner.proc = &this;
    166 
    167         __cfaabi_dbg_print_safe("Kernel : constructing main processor context %p\n", &runner);
    168         runner{ &this };
    169 }
    170 
    171154void ^?{}(processor & this) with( this ){
    172155        if( ! do_terminate ) {
     
    174157                terminate(&this);
    175158                verify(this.do_terminate);
    176                 verify(TL_GET( this_processor ) != &this);
     159                verify( kernelTLS.this_processor != &this);
    177160                P( terminated );
    178                 verify(TL_GET( this_processor ) != &this);
     161                verify( kernelTLS.this_processor != &this);
    179162                pthread_join( kernel_thread, NULL );
    180163        }
    181164}
    182165
    183 void ?{}(cluster & this) with( this ) {
     166void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) {
     167        this.name = name;
     168        this.preemption_rate = preemption_rate;
    184169        ready_queue{};
    185170        ready_queue_lock{};
    186171
    187         preemption_rate = default_preemption();
     172        procs{ __get };
     173        idles{ __get };
     174
     175        doregister(this);
    188176}
    189177
    190178void ^?{}(cluster & this) {
    191 
     179        unregister(this);
    192180}
    193181
     
    202190        __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this);
    203191
     192        doregister(this->cltr, this);
     193
    204194        {
    205195                // Setup preemption data
     
    215205                        if(readyThread)
    216206                        {
    217                                 verify( ! TL_GET( preemption_state ).enabled );
     207                                verify( ! kernelTLS.preemption_state.enabled );
    218208
    219209                                runThread(this, readyThread);
    220210
    221                                 verify( ! TL_GET( preemption_state ).enabled );
     211                                verify( ! kernelTLS.preemption_state.enabled );
    222212
    223213                                //Some actions need to be taken from the kernel
     
    235225        }
    236226
     227        unregister(this->cltr, this);
     228
    237229        V( this->terminated );
    238230
     
    240232}
    241233
     234// KERNEL ONLY
    242235// runThread runs a thread by context switching
    243236// from the processor coroutine to the target thread
     
    247240        coroutine_desc * thrd_cor = dst->curr_cor;
    248241
    249         //Reset the terminating actions here
     242        // Reset the terminating actions here
    250243        this->finish.action_code = No_Action;
    251244
    252         //Update global state
    253         TL_SET( this_thread, dst );
     245        // Update global state
     246        kernelTLS.this_thread = dst;
    254247
    255248        // Context Switch to the thread
     
    258251}
    259252
     253// KERNEL_ONLY
    260254void returnToKernel() {
    261         coroutine_desc * proc_cor = get_coroutine(TL_GET( this_processor )->runner);
    262         coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine );
     255        coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner);
     256        coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine;
    263257        ThreadCtxSwitch(thrd_cor, proc_cor);
    264258}
    265259
     260// KERNEL_ONLY
    266261// Once a thread has finished running, some of
    267262// its final actions must be executed from the kernel
    268263void finishRunning(processor * this) with( this->finish ) {
    269264        if( action_code == Release ) {
    270                 verify( ! TL_GET( preemption_state ).enabled );
     265                verify( ! kernelTLS.preemption_state.enabled );
    271266                unlock( *lock );
    272267        }
     
    275270        }
    276271        else if( action_code == Release_Schedule ) {
    277                 verify( ! TL_GET( preemption_state ).enabled );
     272                verify( ! kernelTLS.preemption_state.enabled );
    278273                unlock( *lock );
    279274                ScheduleThread( thrd );
    280275        }
    281276        else if( action_code == Release_Multi ) {
    282                 verify( ! TL_GET( preemption_state ).enabled );
     277                verify( ! kernelTLS.preemption_state.enabled );
    283278                for(int i = 0; i < lock_count; i++) {
    284279                        unlock( *locks[i] );
     
    304299}
    305300
     301// KERNEL_ONLY
    306302// Context invoker for processors
    307303// This is the entry point for processors (kernel threads)
     
    309305void * CtxInvokeProcessor(void * arg) {
    310306        processor * proc = (processor *) arg;
    311         TL_SET( this_processor, proc );
    312         TL_SET( this_coroutine, NULL );
    313         TL_SET( this_thread, NULL );
    314         TL_GET( preemption_state ).enabled = false;
    315         TL_GET( preemption_state ).disable_count = 1;
     307        kernelTLS.this_processor = proc;
     308        kernelTLS.this_coroutine = NULL;
     309        kernelTLS.this_thread    = NULL;
     310        kernelTLS.preemption_state.[enabled, disable_count] = [false, 1];
    316311        // SKULLDUGGERY: We want to create a context for the processor coroutine
    317312        // which is needed for the 2-step context switch. However, there is no reason
     
    325320
    326321        //Set global state
    327         TL_SET( this_coroutine, get_coroutine(proc->runner) );
    328         TL_SET( this_thread, NULL );
     322        kernelTLS.this_coroutine = get_coroutine(proc->runner);
     323        kernelTLS.this_thread    = NULL;
    329324
    330325        //We now have a proper context from which to schedule threads
     
    353348}
    354349
     350// KERNEL_ONLY
    355351void kernel_first_resume(processor * this) {
    356         coroutine_desc * src = TL_GET( this_coroutine );
     352        coroutine_desc * src = kernelTLS.this_coroutine;
    357353        coroutine_desc * dst = get_coroutine(this->runner);
    358354
    359         verify( ! TL_GET( preemption_state ).enabled );
     355        verify( ! kernelTLS.preemption_state.enabled );
    360356
    361357        create_stack(&dst->stack, dst->stack.size);
    362358        CtxStart(&this->runner, CtxInvokeCoroutine);
    363359
    364         verify( ! TL_GET( preemption_state ).enabled );
     360        verify( ! kernelTLS.preemption_state.enabled );
    365361
    366362        dst->last = src;
     
    371367
    372368        // set new coroutine that task is executing
    373         TL_SET( this_coroutine, dst );
     369        kernelTLS.this_coroutine = dst;
    374370
    375371        // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch.
     
    388384        src->state = Active;
    389385
    390         verify( ! TL_GET( preemption_state ).enabled );
     386        verify( ! kernelTLS.preemption_state.enabled );
    391387}
    392388
    393389//-----------------------------------------------------------------------------
    394390// Scheduler routines
     391
     392// KERNEL ONLY
    395393void ScheduleThread( thread_desc * thrd ) {
    396         // if( ! thrd ) return;
    397394        verify( thrd );
    398395        verify( thrd->self_cor.state != Halted );
    399396
    400         verify( ! TL_GET( preemption_state ).enabled );
     397        verify( ! kernelTLS.preemption_state.enabled );
    401398
    402399        verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );
    403400
    404         with( *TL_GET( this_processor )->cltr ) {
     401        with( *thrd->curr_cluster ) {
    405402                lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
    406403                append( ready_queue, thrd );
     
    408405        }
    409406
    410         verify( ! TL_GET( preemption_state ).enabled );
    411 }
    412 
     407        verify( ! kernelTLS.preemption_state.enabled );
     408}
     409
     410// KERNEL ONLY
    413411thread_desc * nextThread(cluster * this) with( *this ) {
    414         verify( ! TL_GET( preemption_state ).enabled );
     412        verify( ! kernelTLS.preemption_state.enabled );
    415413        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    416414        thread_desc * head = pop_head( ready_queue );
    417415        unlock( ready_queue_lock );
    418         verify( ! TL_GET( preemption_state ).enabled );
     416        verify( ! kernelTLS.preemption_state.enabled );
    419417        return head;
    420418}
     
    422420void BlockInternal() {
    423421        disable_interrupts();
    424         verify( ! TL_GET( preemption_state ).enabled );
     422        verify( ! kernelTLS.preemption_state.enabled );
    425423        returnToKernel();
    426         verify( ! TL_GET( preemption_state ).enabled );
     424        verify( ! kernelTLS.preemption_state.enabled );
    427425        enable_interrupts( __cfaabi_dbg_ctx );
    428426}
     
    430428void BlockInternal( __spinlock_t * lock ) {
    431429        disable_interrupts();
    432         TL_GET( this_processor )->finish.action_code = Release;
    433         TL_GET( this_processor )->finish.lock        = lock;
    434 
    435         verify( ! TL_GET( preemption_state ).enabled );
     430        with( *kernelTLS.this_processor ) {
     431                finish.action_code = Release;
     432                finish.lock        = lock;
     433        }
     434
     435        verify( ! kernelTLS.preemption_state.enabled );
    436436        returnToKernel();
    437         verify( ! TL_GET( preemption_state ).enabled );
     437        verify( ! kernelTLS.preemption_state.enabled );
    438438
    439439        enable_interrupts( __cfaabi_dbg_ctx );
     
    442442void BlockInternal( thread_desc * thrd ) {
    443443        disable_interrupts();
    444         TL_GET( this_processor )->finish.action_code = Schedule;
    445         TL_GET( this_processor )->finish.thrd        = thrd;
    446 
    447         verify( ! TL_GET( preemption_state ).enabled );
     444        with( * kernelTLS.this_processor ) {
     445                finish.action_code = Schedule;
     446                finish.thrd        = thrd;
     447        }
     448
     449        verify( ! kernelTLS.preemption_state.enabled );
    448450        returnToKernel();
    449         verify( ! TL_GET( preemption_state ).enabled );
     451        verify( ! kernelTLS.preemption_state.enabled );
    450452
    451453        enable_interrupts( __cfaabi_dbg_ctx );
     
    455457        assert(thrd);
    456458        disable_interrupts();
    457         TL_GET( this_processor )->finish.action_code = Release_Schedule;
    458         TL_GET( this_processor )->finish.lock        = lock;
    459         TL_GET( this_processor )->finish.thrd        = thrd;
    460 
    461         verify( ! TL_GET( preemption_state ).enabled );
     459        with( * kernelTLS.this_processor ) {
     460                finish.action_code = Release_Schedule;
     461                finish.lock        = lock;
     462                finish.thrd        = thrd;
     463        }
     464
     465        verify( ! kernelTLS.preemption_state.enabled );
    462466        returnToKernel();
    463         verify( ! TL_GET( preemption_state ).enabled );
     467        verify( ! kernelTLS.preemption_state.enabled );
    464468
    465469        enable_interrupts( __cfaabi_dbg_ctx );
     
    468472void BlockInternal(__spinlock_t * locks [], unsigned short count) {
    469473        disable_interrupts();
    470         TL_GET( this_processor )->finish.action_code = Release_Multi;
    471         TL_GET( this_processor )->finish.locks       = locks;
    472         TL_GET( this_processor )->finish.lock_count  = count;
    473 
    474         verify( ! TL_GET( preemption_state ).enabled );
     474        with( * kernelTLS.this_processor ) {
     475                finish.action_code = Release_Multi;
     476                finish.locks       = locks;
     477                finish.lock_count  = count;
     478        }
     479
     480        verify( ! kernelTLS.preemption_state.enabled );
    475481        returnToKernel();
    476         verify( ! TL_GET( preemption_state ).enabled );
     482        verify( ! kernelTLS.preemption_state.enabled );
    477483
    478484        enable_interrupts( __cfaabi_dbg_ctx );
     
    481487void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) {
    482488        disable_interrupts();
    483         TL_GET( this_processor )->finish.action_code = Release_Multi_Schedule;
    484         TL_GET( this_processor )->finish.locks       = locks;
    485         TL_GET( this_processor )->finish.lock_count  = lock_count;
    486         TL_GET( this_processor )->finish.thrds       = thrds;
    487         TL_GET( this_processor )->finish.thrd_count  = thrd_count;
    488 
    489         verify( ! TL_GET( preemption_state ).enabled );
     489        with( *kernelTLS.this_processor ) {
     490                finish.action_code = Release_Multi_Schedule;
     491                finish.locks       = locks;
     492                finish.lock_count  = lock_count;
     493                finish.thrds       = thrds;
     494                finish.thrd_count  = thrd_count;
     495        }
     496
     497        verify( ! kernelTLS.preemption_state.enabled );
    490498        returnToKernel();
    491         verify( ! TL_GET( preemption_state ).enabled );
     499        verify( ! kernelTLS.preemption_state.enabled );
    492500
    493501        enable_interrupts( __cfaabi_dbg_ctx );
    494502}
    495503
     504// KERNEL ONLY
    496505void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
    497         verify( ! TL_GET( preemption_state ).enabled );
    498         TL_GET( this_processor )->finish.action_code = thrd ? Release_Schedule : Release;
    499         TL_GET( this_processor )->finish.lock        = lock;
    500         TL_GET( this_processor )->finish.thrd        = thrd;
     506        verify( ! kernelTLS.preemption_state.enabled );
     507        with( * kernelTLS.this_processor ) {
     508                finish.action_code = thrd ? Release_Schedule : Release;
     509                finish.lock        = lock;
     510                finish.thrd        = thrd;
     511        }
    501512
    502513        returnToKernel();
     
    509520// Kernel boot procedures
    510521void kernel_startup(void) {
    511         verify( ! TL_GET( preemption_state ).enabled );
     522        verify( ! kernelTLS.preemption_state.enabled );
    512523        __cfaabi_dbg_print_safe("Kernel : Starting\n");
     524
     525        global_threads. list{ __get };
     526        global_threads. lock{};
     527        global_clusters.list{ __get };
     528        global_clusters.lock{};
     529
     530        // Initialize the main cluster
     531        mainCluster = (cluster *)&storage_mainCluster;
     532        (*mainCluster){"Main Cluster"};
     533
     534        __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n");
    513535
    514536        // Start by initializing the main thread
     
    521543        __cfaabi_dbg_print_safe("Kernel : Main thread ready\n");
    522544
    523         // Initialize the main cluster
    524         mainCluster = (cluster *)&storage_mainCluster;
    525         (*mainCluster){};
    526 
    527         __cfaabi_dbg_print_safe("Kernel : main cluster ready\n");
     545
     546
     547        // Construct the processor context of the main processor
     548        void ?{}(processorCtx_t & this, processor * proc) {
     549                (this.__cor){ "Processor" };
     550                this.__cor.starter = NULL;
     551                this.proc = proc;
     552        }
     553
     554        void ?{}(processor & this) with( this ) {
     555                name = "Main Processor";
     556                cltr = mainCluster;
     557                terminated{ 0 };
     558                do_terminate = false;
     559                preemption_alarm = NULL;
     560                pending_preemption = false;
     561                kernel_thread = pthread_self();
     562
     563                runner{ &this };
     564                __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner);
     565        }
    528566
    529567        // Initialize the main processor and the main processor ctx
    530568        // (the coroutine that contains the processing control flow)
    531569        mainProcessor = (processor *)&storage_mainProcessor;
    532         (*mainProcessor){ mainCluster, *(processorCtx_t *)&storage_mainProcessorCtx };
     570        (*mainProcessor){};
    533571
    534572        //initialize the global state variables
    535         TL_SET( this_processor, mainProcessor );
    536         TL_SET( this_thread, mainThread );
    537         TL_SET( this_coroutine, &mainThread->self_cor );
     573        kernelTLS.this_processor = mainProcessor;
     574        kernelTLS.this_thread    = mainThread;
     575        kernelTLS.this_coroutine = &mainThread->self_cor;
    538576
    539577        // Enable preemption
     
    547585        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    548586        // mainThread is on the ready queue when this call is made.
    549         kernel_first_resume( TL_GET( this_processor ) );
     587        kernel_first_resume( kernelTLS.this_processor );
    550588
    551589
     
    554592        __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n");
    555593
    556         verify( ! TL_GET( preemption_state ).enabled );
     594        verify( ! kernelTLS.preemption_state.enabled );
    557595        enable_interrupts( __cfaabi_dbg_ctx );
    558         verify( TL_GET( preemption_state ).enabled );
     596        verify( TL_GET( preemption_state.enabled ) );
    559597}
    560598
     
    562600        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
    563601
    564         verify( TL_GET( preemption_state ).enabled );
     602        verify( TL_GET( preemption_state.enabled ) );
    565603        disable_interrupts();
    566         verify( ! TL_GET( preemption_state ).enabled );
     604        verify( ! kernelTLS.preemption_state.enabled );
    567605
    568606        // SKULLDUGGERY: Notify the mainProcessor it needs to terminates.
     
    590628
    591629//=============================================================================================
     630// Kernel Quiescing
     631//=============================================================================================
     632
     633// void halt(processor * this) with( this ) {
     634//      pthread_mutex_lock( &idle.lock );
     635
     636
     637
     638//      // SKULLDUGGERY: Even if spurious wake-up is a thing
     639//      // spuriously waking up a kernel thread is not a big deal
     640//      // if it is very rare.
     641//      pthread_cond_wait( &idle.cond, &idle.lock);
     642//      pthread_mutex_unlock( &idle.lock );
     643// }
     644
     645// void wake(processor * this) with( this ) {
     646//      pthread_mutex_lock  (&idle.lock);
     647//      pthread_cond_signal (&idle.cond);
     648//      pthread_mutex_unlock(&idle.lock);
     649// }
     650
     651//=============================================================================================
    592652// Unexpected Terminating logic
    593653//=============================================================================================
     
    595655
    596656static __spinlock_t kernel_abort_lock;
    597 static __spinlock_t kernel_debug_lock;
    598657static bool kernel_abort_called = false;
    599658
    600 void * kernel_abort    (void) __attribute__ ((__nothrow__)) {
     659void * kernel_abort(void) __attribute__ ((__nothrow__)) {
    601660        // abort cannot be recursively entered by the same or different processors because all signal handlers return when
    602661        // the globalAbort flag is true.
     
    604663
    605664        // first task to abort ?
    606         if ( ! kernel_abort_called ) {                  // not first task to abort ?
     665        if ( kernel_abort_called ) {                    // not first task to abort ?
     666                unlock( kernel_abort_lock );
     667
     668                sigset_t mask;
     669                sigemptyset( &mask );
     670                sigaddset( &mask, SIGALRM );            // block SIGALRM signals
     671                sigsuspend( &mask );                    // block the processor to prevent further damage during abort
     672                _exit( EXIT_FAILURE );                  // if processor unblocks before it is killed, terminate it
     673        }
     674        else {
    607675                kernel_abort_called = true;
    608676                unlock( kernel_abort_lock );
    609677        }
    610         else {
    611                 unlock( kernel_abort_lock );
    612 
    613                 sigset_t mask;
    614                 sigemptyset( &mask );
    615                 sigaddset( &mask, SIGALRM );                    // block SIGALRM signals
    616                 sigaddset( &mask, SIGUSR1 );                    // block SIGUSR1 signals
    617                 sigsuspend( &mask );                            // block the processor to prevent further damage during abort
    618                 _exit( EXIT_FAILURE );                          // if processor unblocks before it is killed, terminate it
    619         }
    620 
    621         return TL_GET( this_thread );
     678
     679        return kernelTLS.this_thread;
    622680}
    623681
     
    625683        thread_desc * thrd = kernel_data;
    626684
    627         int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->self_cor.name, thrd );
    628         __cfaabi_dbg_bits_write( abort_text, len );
    629 
    630         if ( get_coroutine(thrd) != TL_GET( this_coroutine ) ) {
    631                 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ) );
     685        if(thrd) {
     686                int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd );
    632687                __cfaabi_dbg_bits_write( abort_text, len );
     688
     689                if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) {
     690                        len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine );
     691                        __cfaabi_dbg_bits_write( abort_text, len );
     692                }
     693                else {
     694                        __cfaabi_dbg_bits_write( ".\n", 2 );
     695                }
    633696        }
    634697        else {
    635                 __cfaabi_dbg_bits_write( ".\n", 2 );
     698                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
    636699        }
    637700}
    638701
    639702int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {
    640         return get_coroutine(TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2;
    641 }
     703        return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2;
     704}
     705
     706static __spinlock_t kernel_debug_lock;
    642707
    643708extern "C" {
     
    668733        if ( count < 0 ) {
    669734                // queue current task
    670                 append( waiting, (thread_desc *)TL_GET( this_thread ) );
     735                append( waiting, kernelTLS.this_thread );
    671736
    672737                // atomically release spin lock and block
     
    694759
    695760//-----------------------------------------------------------------------------
     761// Global Queues
     762void doregister( thread_desc & thrd ) {
     763        // lock      ( global_thread.lock );
     764        // push_front( global_thread.list, thrd );
     765        // unlock    ( global_thread.lock );
     766}
     767
     768void unregister( thread_desc & thrd ) {
     769        // lock  ( global_thread.lock );
     770        // remove( global_thread.list, thrd );
     771        // unlock( global_thread.lock );
     772}
     773
     774void doregister( cluster     & cltr ) {
     775        // lock      ( global_cluster.lock );
     776        // push_front( global_cluster.list, cltr );
     777        // unlock    ( global_cluster.lock );
     778}
     779
     780void unregister( cluster     & cltr ) {
     781        // lock  ( global_cluster.lock );
     782        // remove( global_cluster.list, cltr );
     783        // unlock( global_cluster.lock );
     784}
     785
     786
     787void doregister( cluster * cltr, processor * proc ) {
     788        // lock      (cltr->proc_list_lock __cfaabi_dbg_ctx2);
     789        // push_front(cltr->procs, *proc);
     790        // unlock    (cltr->proc_list_lock);
     791}
     792
     793void unregister( cluster * cltr, processor * proc ) {
     794        // lock  (cltr->proc_list_lock __cfaabi_dbg_ctx2);
     795        // remove(cltr->procs, *proc );
     796        // unlock(cltr->proc_list_lock);
     797}
     798
     799//-----------------------------------------------------------------------------
    696800// Debug
    697801__cfaabi_dbg_debug_do(
    698         struct {
    699                 thread_desc * tail;
    700         } __cfaabi_dbg_thread_list = { NULL };
    701 
    702         void __cfaabi_dbg_thread_register( thread_desc * thrd ) {
    703                 if( !__cfaabi_dbg_thread_list.tail ) {
    704                         __cfaabi_dbg_thread_list.tail = thrd;
    705                         return;
    706                 }
    707                 __cfaabi_dbg_thread_list.tail->dbg_next = thrd;
    708                 thrd->dbg_prev = __cfaabi_dbg_thread_list.tail;
    709                 __cfaabi_dbg_thread_list.tail = thrd;
    710         }
    711 
    712         void __cfaabi_dbg_thread_unregister( thread_desc * thrd ) {
    713                 thread_desc * prev = thrd->dbg_prev;
    714                 thread_desc * next = thrd->dbg_next;
    715 
    716                 if( next ) { next->dbg_prev = prev; }
    717                 else       {
    718                         assert( __cfaabi_dbg_thread_list.tail == thrd );
    719                         __cfaabi_dbg_thread_list.tail = prev;
    720                 }
    721 
    722                 if( prev ) { prev->dbg_next = next; }
    723 
    724                 thrd->dbg_prev = NULL;
    725                 thrd->dbg_next = NULL;
     802        void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {
     803                this.prev_name = prev_name;
     804                this.prev_thrd = kernelTLS.this_thread;
    726805        }
    727806)
  • src/libcfa/concurrency/kernel_private.h

    rf6f0cca3 rff29f08  
    100100#define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
    101101
     102
     103void doregister( struct thread_desc & thrd );
     104void unregister( struct thread_desc & thrd );
     105
     106void doregister( struct cluster     & cltr );
     107void unregister( struct cluster     & cltr );
     108
     109void doregister( struct cluster * cltr, struct processor * proc );
     110void unregister( struct cluster * cltr, struct processor * proc );
     111
    102112// Local Variables: //
    103113// mode: c //
  • src/libcfa/concurrency/monitor.c

    rf6f0cca3 rff29f08  
    8585                // Lock the monitor spinlock
    8686                lock( this->lock __cfaabi_dbg_ctx2 );
    87                 thread_desc * thrd = TL_GET( this_thread );
     87                // Interrupts disable inside critical section
     88                thread_desc * thrd = kernelTLS.this_thread;
    8889
    8990                __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner);
     
    134135                // Lock the monitor spinlock
    135136                lock( this->lock __cfaabi_dbg_ctx2 );
    136                 thread_desc * thrd = TL_GET( this_thread );
     137                // Interrupts disable inside critical section
     138                thread_desc * thrd = kernelTLS.this_thread;
    137139
    138140                __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner);
     
    168170
    169171                        // Create the node specific to this wait operation
    170                         wait_ctx_primed( TL_GET( this_thread ), 0 )
     172                        wait_ctx_primed( thrd, 0 )
    171173
    172174                        // Some one else has the monitor, wait for him to finish and then run
     
    179181                        __cfaabi_dbg_print_safe( "Kernel :  blocking \n" );
    180182
    181                         wait_ctx( TL_GET( this_thread ), 0 )
     183                        wait_ctx( thrd, 0 )
    182184                        this->dtor_node = &waiter;
    183185
     
    199201                lock( this->lock __cfaabi_dbg_ctx2 );
    200202
    201                 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", TL_GET( this_thread ), this, this->owner);
    202 
    203                 verifyf( TL_GET( this_thread ) == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", TL_GET( this_thread ), this->owner, this->recursion, this );
     203                __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner);
     204
     205                verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );
    204206
    205207                // Leaving a recursion level, decrement the counter
     
    289291// Sorts monitors before entering
    290292void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) {
     293        thread_desc * thrd = TL_GET( this_thread );
     294
    291295        // Store current array
    292296        this.m = m;
     
    297301
    298302        // Save previous thread context
    299         this.prev = TL_GET( this_thread )->monitors;
     303        this.prev = thrd->monitors;
    300304
    301305        // Update thread context (needed for conditions)
    302         (TL_GET( this_thread )->monitors){m, count, func};
     306        (thrd->monitors){m, count, func};
    303307
    304308        // __cfaabi_dbg_print_safe( "MGUARD : enter %d\n", count);
     
    328332// Sorts monitors before entering
    329333void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) {
     334        // optimization
     335        thread_desc * thrd = TL_GET( this_thread );
     336
    330337        // Store current array
    331338        this.m = *m;
    332339
    333340        // Save previous thread context
    334         this.prev = TL_GET( this_thread )->monitors;
     341        this.prev = thrd->monitors;
    335342
    336343        // Update thread context (needed for conditions)
    337         (TL_GET( this_thread )->monitors){m, 1, func};
     344        (thrd->monitors){m, 1, func};
    338345
    339346        __enter_monitor_dtor( this.m, func );
     
    473480
    474481        // Create the node specific to this wait operation
    475         wait_ctx_primed( TL_GET( this_thread ), 0 )
     482        wait_ctx_primed( kernelTLS.this_thread, 0 )
    476483
    477484        //save contexts
     
    566573
    567574                                // Create the node specific to this wait operation
    568                                 wait_ctx_primed( TL_GET( this_thread ), 0 );
     575                                wait_ctx_primed( kernelTLS.this_thread, 0 );
    569576
    570577                                // Save monitor states
     
    612619
    613620        // Create the node specific to this wait operation
    614         wait_ctx_primed( TL_GET( this_thread ), 0 );
     621        wait_ctx_primed( kernelTLS.this_thread, 0 );
    615622
    616623        monitor_save;
     
    618625
    619626        for( __lock_size_t i = 0; i < count; i++) {
    620                 verify( monitors[i]->owner == TL_GET( this_thread ) );
     627                verify( monitors[i]->owner == kernelTLS.this_thread );
    621628        }
    622629
  • src/libcfa/concurrency/preemption.c

    rf6f0cca3 rff29f08  
    149149        // Disable interrupts by incrementing the counter
    150150        void disable_interrupts() {
    151                 TL_GET( preemption_state ).enabled = false;
    152                 __attribute__((unused)) unsigned short new_val = TL_GET( preemption_state ).disable_count + 1;
    153                 TL_GET( preemption_state ).disable_count = new_val;
    154                 verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
     151                with( kernelTLS.preemption_state ) {
     152                        enabled = false;
     153                        __attribute__((unused)) unsigned short new_val = disable_count + 1;
     154                        disable_count = new_val;
     155                        verify( new_val < 65_000u );              // If this triggers someone is disabling interrupts without enabling them
     156                }
    155157        }
    156158
     
    158160        // If counter reaches 0, execute any pending CtxSwitch
    159161        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    160                 processor   * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add
    161                 thread_desc * thrd = TL_GET( this_thread );       // Cache the thread now since interrupts can start happening after the atomic add
    162 
    163                 unsigned short prev = TL_GET( preemption_state ).disable_count;
    164                 TL_GET( preemption_state ).disable_count -= 1;
    165                 verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
    166 
    167                 // Check if we need to prempt the thread because an interrupt was missed
    168                 if( prev == 1 ) {
    169                         TL_GET( preemption_state ).enabled = true;
    170                         if( proc->pending_preemption ) {
    171                                 proc->pending_preemption = false;
    172                                 BlockInternal( thrd );
     162                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add
     163                thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic add
     164
     165                with( kernelTLS.preemption_state ){
     166                        unsigned short prev = disable_count;
     167                        disable_count -= 1;
     168                        verify( prev != 0u );                     // If this triggers someone is enabled already enabled interruptsverify( prev != 0u );
     169
     170                        // Check if we need to prempt the thread because an interrupt was missed
     171                        if( prev == 1 ) {
     172                                enabled = true;
     173                                if( proc->pending_preemption ) {
     174                                        proc->pending_preemption = false;
     175                                        BlockInternal( thrd );
     176                                }
    173177                        }
    174178                }
     
    181185        // Don't execute any pending CtxSwitch even if counter reaches 0
    182186        void enable_interrupts_noPoll() {
    183                 unsigned short prev = TL_GET( preemption_state ).disable_count;
    184                 TL_GET( preemption_state ).disable_count -= 1;
     187                unsigned short prev = kernelTLS.preemption_state.disable_count;
     188                kernelTLS.preemption_state.disable_count -= 1;
    185189                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    186190                if( prev == 1 ) {
    187                         TL_GET( preemption_state ).enabled = true;
     191                        kernelTLS.preemption_state.enabled = true;
    188192                }
    189193        }
     
    230234}
    231235
    232 
     236// KERNEL ONLY
    233237// Check if a CtxSwitch signal handler shoud defer
    234238// If true  : preemption is safe
    235239// If false : preemption is unsafe and marked as pending
    236240static inline bool preemption_ready() {
    237         bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe
    238         TL_GET( this_processor )->pending_preemption = !ready;                  // Adjust the pending flag accordingly
     241        // Check if preemption is safe
     242        bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress;
     243
     244        // Adjust the pending flag accordingly
     245        kernelTLS.this_processor->pending_preemption = !ready;
    239246        return ready;
    240247}
     
    250257
    251258        // Start with preemption disabled until ready
    252         TL_GET( preemption_state ).enabled = false;
    253         TL_GET( preemption_state ).disable_count = 1;
     259        kernelTLS.preemption_state.enabled = false;
     260        kernelTLS.preemption_state.disable_count = 1;
    254261
    255262        // Initialize the event kernel
     
    316323        // before the kernel thread has even started running. When that happens an iterrupt
    317324        // we a null 'this_processor' will be caught, just ignore it.
    318         if(!TL_GET( this_processor )) return;
     325        if(! kernelTLS.this_processor ) return;
    319326
    320327        choose(sfp->si_value.sival_int) {
    321328                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    322                 case PREEMPT_TERMINATE: verify(TL_GET( this_processor )->do_terminate);
     329                case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate);
    323330                default:
    324331                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    328335        if( !preemption_ready() ) { return; }
    329336
    330         __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread);
    331 
    332         TL_GET( preemption_state ).in_progress = true;  // Sync flag : prevent recursive calls to the signal handler
    333         signal_unblock( SIGUSR1 );                          // We are about to CtxSwitch out of the signal handler, let other handlers in
    334         TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag
     337        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread );
     338
     339        // Sync flag : prevent recursive calls to the signal handler
     340        kernelTLS.preemption_state.in_progress = true;
     341
     342        // We are about to CtxSwitch out of the signal handler, let other handlers in
     343        signal_unblock( SIGUSR1 );
     344
     345        // TODO: this should go in finish action
     346        // Clear the in progress flag
     347        kernelTLS.preemption_state.in_progress = false;
    335348
    336349        // Preemption can occur here
    337350
    338         BlockInternal( (thread_desc*)TL_GET( this_thread ) ); // Do the actual CtxSwitch
     351        BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch
    339352}
    340353
     
    344357        // Block sigalrms to control when they arrive
    345358        sigset_t mask;
     359        sigfillset(&mask);
     360        if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
     361            abort( "internal error, pthread_sigmask" );
     362        }
     363
    346364        sigemptyset( &mask );
    347365        sigaddset( &mask, SIGALRM );
    348 
    349         if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {
    350             abort( "internal error, pthread_sigmask" );
    351         }
    352366
    353367        // Main loop
     
    400414}
    401415
     416//=============================================================================================
     417// Kernel Signal Debug
     418//=============================================================================================
     419
     420void __cfaabi_check_preemption() {
     421        bool ready = kernelTLS.preemption_state.enabled;
     422        if(!ready) { abort("Preemption should be ready"); }
     423
     424        sigset_t oldset;
     425        int ret;
     426        ret = sigprocmask(0, NULL, &oldset);
     427        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
     428
     429        ret = sigismember(&oldset, SIGUSR1);
     430        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
     431
     432        if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
     433}
     434
    402435// Local Variables: //
    403436// mode: c //
  • src/libcfa/concurrency/thread

    rf6f0cca3 rff29f08  
    2020
    2121#include "coroutine"
     22#include "kernel"
    2223#include "monitor"
    2324
    2425//-----------------------------------------------------------------------------
    25 // Coroutine trait
    26 // Anything that implements this trait can be resumed.
    27 // Anything that is resumed is a coroutine.
     26// thread trait
    2827trait is_thread(dtype T) {
    2928      void ^?{}(T& mutex this);
     
    5251}
    5352
    54 //extern thread_local thread_desc * volatile this_thread;
     53extern struct cluster * mainCluster;
    5554
    5655forall( dtype T | is_thread(T) )
     
    5958//-----------------------------------------------------------------------------
    6059// Ctors and dtors
    61 void ?{}(thread_desc& this);
    62 void ^?{}(thread_desc& this);
     60void ?{}(thread_desc & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );
     61void ^?{}(thread_desc & this);
     62
     63static inline void ?{}(thread_desc & this)                                                                  { this{ "Anonymous Thread", *mainCluster, NULL, 0 }; }
     64static inline void ?{}(thread_desc & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }
     65static inline void ?{}(thread_desc & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
     66static inline void ?{}(thread_desc & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, NULL, 0 }; }
     67static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0, stackSize }; }
     68static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
     69static inline void ?{}(thread_desc & this, const char * const name)                                         { this{ name, *mainCluster, NULL, 0 }; }
     70static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl )                   { this{ name, cl, NULL, 0 }; }
     71static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }
    6372
    6473//-----------------------------------------------------------------------------
  • src/libcfa/concurrency/thread.c

    rf6f0cca3 rff29f08  
    3030//-----------------------------------------------------------------------------
    3131// Thread ctors and dtors
    32 
    33 void ?{}(thread_desc& this) with( this ) {
    34         self_cor{};
    35         self_cor.name = "Anonymous Coroutine";
     32void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
     33        self_cor{ name, storage, storageSize };
     34        verify(&self_cor);
    3635        curr_cor = &self_cor;
    3736        self_mon.owner = &this;
    3837        self_mon.recursion = 1;
    3938        self_mon_p = &self_mon;
     39        curr_cluster = &cl;
    4040        next = NULL;
    41         __cfaabi_dbg_debug_do(
    42                 dbg_next = NULL;
    43                 dbg_prev = NULL;
    44                 __cfaabi_dbg_thread_register(&this);
    45         )
     41
     42        node.next = NULL;
     43        node.prev = NULL;
     44        doregister(this);
    4645
    4746        monitors{ &self_mon_p, 1, (fptr_t)0 };
     
    4948
    5049void ^?{}(thread_desc& this) with( this ) {
     50        unregister(this);
    5151        ^self_cor{};
    5252}
     
    8181        disable_interrupts();
    8282        create_stack(&thrd_c->stack, thrd_c->stack.size);
    83         TL_SET( this_coroutine, thrd_c );
     83        kernelTLS.this_coroutine = thrd_c;
    8484        CtxStart(&this, CtxInvokeThread);
    8585        assert( thrd_c->last->stack.context );
     
    9191
    9292extern "C" {
     93        // KERNEL ONLY
    9394        void __finish_creation(void) {
    94                 coroutine_desc* thrd_c = TL_GET( this_coroutine );
     95                coroutine_desc* thrd_c = kernelTLS.this_coroutine;
    9596                ThreadCtxSwitch( thrd_c, thrd_c->last );
    9697        }
     
    9899
    99100void yield( void ) {
    100         verify( TL_GET( preemption_state ).enabled );
     101        // Safety note : This could cause some false positives due to preemption
     102      verify( TL_GET( preemption_state.enabled ) );
    101103        BlockInternal( TL_GET( this_thread ) );
    102         verify( TL_GET( preemption_state ).enabled );
     104        // Safety note : This could cause some false positives due to preemption
     105      verify( TL_GET( preemption_state.enabled ) );
    103106}
    104107
     
    109112}
    110113
     114// KERNEL ONLY
    111115void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
    112116        // set state of current coroutine to inactive
     
    116120        // set new coroutine that the processor is executing
    117121        // and context switch to it
    118         TL_SET( this_coroutine, dst );
     122        kernelTLS.this_coroutine = dst;
    119123        assert( src->stack.context );
    120124        CtxSwitch( src->stack.context, dst->stack.context );
    121         TL_SET( this_coroutine, src );
     125        kernelTLS.this_coroutine = src;
    122126
    123127        // set state of new coroutine to active
Note: See TracChangeset for help on using the changeset viewer.