Ignore:
Timestamp:
Jul 12, 2021, 1:44:35 PM (3 years ago)
Author:
caparsons <caparson@…>
Branches:
ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
605673f, 9345684
Parents:
cf444b6 (diff), a953c2e3 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
libcfa/src/concurrency
Files:
27 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/alarm.cfa

    rcf444b6 r6ff08d8  
    5151//=============================================================================================
    5252
    53 void ?{}( alarm_node_t & this, $thread * thrd, Duration alarm, Duration period) with( this ) {
     53void ?{}( alarm_node_t & this, thread$ * thrd, Duration alarm, Duration period) with( this ) {
    5454        this.initial = alarm;
    5555        this.period  = period;
  • libcfa/src/concurrency/alarm.hfa

    rcf444b6 r6ff08d8  
    2525#include "containers/list.hfa"
    2626
    27 struct $thread;
     27struct thread$;
    2828struct processor;
    2929
     
    5252
    5353        union {
    54                 $thread * thrd;                 // thrd who created event
     54                thread$ * thrd;                 // thrd who created event
    5555                processor * proc;                       // proc who created event
    5656                Alarm_Callback callback;        // callback to handle event
     
    6363P9_EMBEDDED( alarm_node_t, dlink(alarm_node_t) )
    6464
    65 void ?{}( alarm_node_t & this, $thread * thrd, Duration alarm, Duration period );
     65void ?{}( alarm_node_t & this, thread$ * thrd, Duration alarm, Duration period );
    6666void ?{}( alarm_node_t & this, processor * proc, Duration alarm, Duration period );
    6767void ?{}( alarm_node_t & this, Alarm_Callback callback, Duration alarm, Duration period );
  • libcfa/src/concurrency/clib/cfathread.cfa

    rcf444b6 r6ff08d8  
    2323#include "cfathread.h"
    2424
    25 extern void ?{}(processor &, const char[], cluster &, $thread *);
     25extern void ?{}(processor &, const char[], cluster &, thread$ *);
    2626extern "C" {
    2727      extern void __cfactx_invoke_thread(void (*main)(void *), void * this);
     
    3434
    3535struct cfathread_object {
    36         $thread self;
     36        thread$ self;
    3737        void * (*themain)( void * );
    3838        void * arg;
     
    4242void ^?{}(cfathread_object & mutex this);
    4343
    44 static inline $thread * get_thread( cfathread_object & this ) { return &this.self; }
     44static inline thread$ * get_thread( cfathread_object & this ) { return &this.self; }
    4545
    4646typedef ThreadCancelled(cfathread_object) cfathread_exception;
     
    8181// Special Init Thread responsible for the initialization or processors
    8282struct __cfainit {
    83         $thread self;
     83        thread$ self;
    8484        void (*init)( void * );
    8585        void * arg;
     
    8888void ^?{}(__cfainit & mutex this);
    8989
    90 static inline $thread * get_thread( __cfainit & this ) { return &this.self; }
     90static inline thread$ * get_thread( __cfainit & this ) { return &this.self; }
    9191
    9292typedef ThreadCancelled(__cfainit) __cfainit_exception;
     
    109109
    110110        // Don't use __thrd_start! just prep the context manually
    111         $thread * this_thrd = get_thread(this);
     111        thread$ * this_thrd = get_thread(this);
    112112        void (*main_p)(__cfainit &) = main;
    113113
  • libcfa/src/concurrency/coroutine.cfa

    rcf444b6 r6ff08d8  
    3737
    3838extern "C" {
    39         void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__));
     39        void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine$ *) __attribute__ ((__noreturn__));
    4040        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
    4141        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
     
    6262forall(T & | is_coroutine(T))
    6363void __cfaehm_cancelled_coroutine(
    64                 T & cor, $coroutine * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {
     64                T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {
    6565        verify( desc->cancellation );
    6666        desc->state = Cancelled;
     
    114114}
    115115
    116 void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ) with( this ) {
     116void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {
    117117        (this.context){0p, 0p};
    118118        (this.stack){storage, storageSize};
     
    124124}
    125125
    126 void ^?{}($coroutine& this) {
     126void ^?{}(coroutine$& this) {
    127127        if(this.state != Halted && this.state != Start && this.state != Primed) {
    128                 $coroutine * src = active_coroutine();
    129                 $coroutine * dst = &this;
     128                coroutine$ * src = active_coroutine();
     129                coroutine$ * dst = &this;
    130130
    131131                struct _Unwind_Exception storage;
     
    148148forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
    149149void prime(T& cor) {
    150         $coroutine* this = get_coroutine(cor);
     150        coroutine$* this = get_coroutine(cor);
    151151        assert(this->state == Start);
    152152
     
    248248// is not inline (We can't inline Cforall in C)
    249249extern "C" {
    250         void __cfactx_cor_leave( struct $coroutine * src ) {
    251                 $coroutine * starter = src->cancellation != 0 ? src->last : src->starter;
     250        void __cfactx_cor_leave( struct coroutine$ * src ) {
     251                coroutine$ * starter = src->cancellation != 0 ? src->last : src->starter;
    252252
    253253                src->state = Halted;
     
    265265        }
    266266
    267         struct $coroutine * __cfactx_cor_finish(void) {
    268                 struct $coroutine * cor = active_coroutine();
     267        struct coroutine$ * __cfactx_cor_finish(void) {
     268                struct coroutine$ * cor = active_coroutine();
    269269
    270270                // get the active thread once
    271                 $thread * athrd = active_thread();
     271                thread$ * athrd = active_thread();
    272272
    273273                /* paranoid */ verify( athrd->corctx_flag );
  • libcfa/src/concurrency/coroutine.hfa

    rcf444b6 r6ff08d8  
    3939trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled, (T))) {
    4040        void main(T & this);
    41         $coroutine * get_coroutine(T & this);
     41        coroutine$ * get_coroutine(T & this);
    4242};
    4343
    44 #define DECL_COROUTINE(X) static inline $coroutine* get_coroutine(X& this) { return &this.__cor; } void main(X& this)
     44#define DECL_COROUTINE(X) static inline coroutine$* get_coroutine(X& this) { return &this.__cor; } void main(X& this)
    4545
    4646//-----------------------------------------------------------------------------
     
    4949// void ^?{}( coStack_t & this );
    5050
    51 void  ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize );
    52 void ^?{}( $coroutine & this );
    53 
    54 static inline void ?{}( $coroutine & this)                                       { this{ "Anonymous Coroutine", 0p, 0 }; }
    55 static inline void ?{}( $coroutine & this, size_t stackSize)                     { this{ "Anonymous Coroutine", 0p, stackSize }; }
    56 static inline void ?{}( $coroutine & this, void * storage, size_t storageSize )  { this{ "Anonymous Coroutine", storage, storageSize }; }
    57 static inline void ?{}( $coroutine & this, const char name[])                    { this{ name, 0p, 0 }; }
    58 static inline void ?{}( $coroutine & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; }
     51void  ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize );
     52void ^?{}( coroutine$ & this );
     53
     54static inline void ?{}( coroutine$ & this)                                       { this{ "Anonymous Coroutine", 0p, 0 }; }
     55static inline void ?{}( coroutine$ & this, size_t stackSize)                     { this{ "Anonymous Coroutine", 0p, stackSize }; }
     56static inline void ?{}( coroutine$ & this, void * storage, size_t storageSize )  { this{ "Anonymous Coroutine", storage, storageSize }; }
     57static inline void ?{}( coroutine$ & this, const char name[])                    { this{ name, 0p, 0 }; }
     58static inline void ?{}( coroutine$ & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; }
    5959
    6060//-----------------------------------------------------------------------------
     
    6363void prime(T & cor);
    6464
    65 static inline struct $coroutine * active_coroutine() { return active_thread()->curr_cor; }
     65static inline struct coroutine$ * active_coroutine() { return active_thread()->curr_cor; }
    6666
    6767//-----------------------------------------------------------------------------
     
    7373
    7474        forall(T &)
    75         void __cfactx_start(void (*main)(T &), struct $coroutine * cor, T & this, void (*invoke)(void (*main)(void *), void *));
    76 
    77         extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__));
     75        void __cfactx_start(void (*main)(T &), struct coroutine$ * cor, T & this, void (*invoke)(void (*main)(void *), void *));
     76
     77        extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ *) __attribute__ ((__noreturn__));
    7878
    7979        extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
     
    8282// Private wrappers for context switch and stack creation
    8383// Wrapper for co
    84 static inline void $ctx_switch( $coroutine * src, $coroutine * dst ) __attribute__((nonnull (1, 2))) {
     84static inline void $ctx_switch( coroutine$ * src, coroutine$ * dst ) __attribute__((nonnull (1, 2))) {
    8585        // set state of current coroutine to inactive
    8686        src->state = src->state == Halted ? Halted : Blocked;
    8787
    8888        // get the active thread once
    89         $thread * athrd = active_thread();
     89        thread$ * athrd = active_thread();
    9090
    9191        // Mark the coroutine
     
    124124                // will also migrate which means this value will
    125125                // stay in syn with the TLS
    126                 $coroutine * src = active_coroutine();
     126                coroutine$ * src = active_coroutine();
    127127
    128128                assertf( src->last != 0,
     
    141141forall(T & | is_coroutine(T))
    142142void __cfaehm_cancelled_coroutine(
    143         T & cor, $coroutine * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) );
     143        T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) );
    144144
    145145// Resume implementation inlined for performance
     
    151151        // will also migrate which means this value will
    152152        // stay in syn with the TLS
    153         $coroutine * src = active_coroutine();
    154         $coroutine * dst = get_coroutine(cor);
     153        coroutine$ * src = active_coroutine();
     154        coroutine$ * dst = get_coroutine(cor);
    155155
    156156        if( unlikely(dst->context.SP == 0p) ) {
     
    180180}
    181181
    182 static inline void resume( $coroutine * dst ) __attribute__((nonnull (1))) {
     182static inline void resume( coroutine$ * dst ) __attribute__((nonnull (1))) {
    183183        // optimization : read TLS once and reuse it
    184184        // Safety note: this is preemption safe since if
     
    186186        // will also migrate which means this value will
    187187        // stay in syn with the TLS
    188         $coroutine * src = active_coroutine();
     188        coroutine$ * src = active_coroutine();
    189189
    190190        // not resuming self ?
  • libcfa/src/concurrency/exception.cfa

    rcf444b6 r6ff08d8  
    2020#include "coroutine.hfa"
    2121
    22 extern struct $thread * mainThread;
     22extern struct thread$ * mainThread;
    2323extern "C" {
    2424extern void __cfactx_thrd_leave();
     
    5555
    5656STOP_AT_END_FUNCTION(coroutine_cancelstop,
    57         struct $coroutine * src = ($coroutine *)stop_param;
    58         struct $coroutine * dst = src->last;
     57        struct coroutine$ * src = (coroutine$ *)stop_param;
     58        struct coroutine$ * dst = src->last;
    5959
    6060        $ctx_switch( src, dst );
     
    7272        void * stop_param;
    7373
    74         struct $thread * this_thread = active_thread();
     74        struct thread$ * this_thread = active_thread();
    7575        if ( &this_thread->self_cor != this_thread->curr_cor ) {
    76                 struct $coroutine * cor = this_thread->curr_cor;
     76                struct coroutine$ * cor = this_thread->curr_cor;
    7777                cor->cancellation = unwind_exception;
    7878
  • libcfa/src/concurrency/future.hfa

    rcf444b6 r6ff08d8  
    3737
    3838                // Fulfil the future, returns whether or not someone was unblocked
    39                 $thread * fulfil( future(T) & this, T result ) {
     39                thread$ * fulfil( future(T) & this, T result ) {
    4040                        this.result = result;
    4141                        return fulfil( (future_t&)this );
  • libcfa/src/concurrency/invoke.c

    rcf444b6 r6ff08d8  
    2929// Called from the kernel when starting a coroutine or task so must switch back to user mode.
    3030
    31 extern struct $coroutine * __cfactx_cor_finish(void);
    32 extern void __cfactx_cor_leave ( struct $coroutine * );
     31extern struct coroutine$ * __cfactx_cor_finish(void);
     32extern void __cfactx_cor_leave ( struct coroutine$ * );
    3333extern void __cfactx_thrd_leave();
    3434
     
    4141) {
    4242        // Finish setting up the coroutine by setting its state
    43         struct $coroutine * cor = __cfactx_cor_finish();
     43        struct coroutine$ * cor = __cfactx_cor_finish();
    4444
    4545        // Call the main of the coroutine
     
    7070}
    7171
    72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) __attribute__ ((__noreturn__));
    73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) {
     72void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
     73void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) {
    7474        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
    7575        printf("UNWIND ERROR %d after force unwind\n", ret);
     
    100100void __cfactx_start(
    101101        void (*main)(void *),
    102         struct $coroutine * cor,
     102        struct coroutine$ * cor,
    103103        void *this,
    104104        void (*invoke)(void *)
  • libcfa/src/concurrency/invoke.h

    rcf444b6 r6ff08d8  
    7171        enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled, Halting };
    7272
    73         struct $coroutine {
     73        struct coroutine$ {
    7474                // context that is switch during a __cfactx_switch
    7575                struct __stack_context_t context;
     
    8585
    8686                // first coroutine to resume this one
    87                 struct $coroutine * starter;
     87                struct coroutine$ * starter;
    8888
    8989                // last coroutine to resume this one
    90                 struct $coroutine * last;
     90                struct coroutine$ * last;
    9191
    9292                // If non-null stack must be unwound with this exception
     
    9595        };
    9696        // Wrapper for gdb
    97         struct cfathread_coroutine_t { struct $coroutine debug; };
    98 
    99         static inline struct __stack_t * __get_stack( struct $coroutine * cor ) {
     97        struct cfathread_coroutine_t { struct coroutine$ debug; };
     98
     99        static inline struct __stack_t * __get_stack( struct coroutine$ * cor ) {
    100100                return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2));
    101101        }
     
    110110        };
    111111
    112         struct $monitor {
     112        struct monitor$ {
    113113                // spinlock to protect internal data
    114114                struct __spinlock_t lock;
    115115
    116116                // current owner of the monitor
    117                 struct $thread * owner;
     117                struct thread$ * owner;
    118118
    119119                // queue of threads that are blocked waiting for the monitor
    120                 __queue_t(struct $thread) entry_queue;
     120                __queue_t(struct thread$) entry_queue;
    121121
    122122                // stack of conditions to run next once we exit the monitor
     
    133133        };
    134134        // Wrapper for gdb
    135         struct cfathread_monitor_t { struct $monitor debug; };
     135        struct cfathread_monitor_t { struct monitor$ debug; };
    136136
    137137        struct __monitor_group_t {
    138138                // currently held monitors
    139                 __cfa_anonymous_object( __small_array_t($monitor*) );
     139                __cfa_anonymous_object( __small_array_t(monitor$*) );
    140140
    141141                // last function that acquired monitors
     
    146146        // instrusive link field for threads
    147147        struct __thread_desc_link {
    148                 struct $thread * next;
     148                struct thread$ * next;
    149149                volatile unsigned long long ts;
    150150        };
    151151
    152         struct $thread {
     152        struct thread$ {
    153153                // Core threading fields
    154154                // context that is switch during a __cfactx_switch
     
    179179
    180180                // coroutine body used to store context
    181                 struct $coroutine  self_cor;
     181                struct coroutine$  self_cor;
    182182
    183183                // current active context
    184                 struct $coroutine * curr_cor;
     184                struct coroutine$ * curr_cor;
    185185
    186186                // monitor body used for mutual exclusion
    187                 struct $monitor    self_mon;
     187                struct monitor$    self_mon;
    188188
    189189                // pointer to monitor with sufficient lifetime for current monitors
    190                 struct $monitor *  self_mon_p;
     190                struct monitor$ *  self_mon_p;
    191191
    192192                // monitors currently held by this thread
     
    195195                // used to put threads on user data structures
    196196                struct {
    197                         struct $thread * next;
    198                         struct $thread * back;
     197                        struct thread$ * next;
     198                        struct thread$ * back;
    199199                } seqable;
    200200
    201201                // used to put threads on dlist data structure
    202                 __cfa_dlink($thread);
     202                __cfa_dlink(thread$);
    203203
    204204                struct {
    205                         struct $thread * next;
    206                         struct $thread * prev;
     205                        struct thread$ * next;
     206                        struct thread$ * prev;
    207207                } node;
    208208
     
    214214        };
    215215        #ifdef __cforall
    216                 P9_EMBEDDED( $thread, dlink($thread) )
     216                P9_EMBEDDED( thread$, dlink(thread$) )
    217217        #endif
    218218        // Wrapper for gdb
    219         struct cfathread_thread_t { struct $thread debug; };
     219        struct cfathread_thread_t { struct thread$ debug; };
    220220
    221221        #ifdef __CFA_DEBUG__
    222                 void __cfaabi_dbg_record_thrd($thread & this, bool park, const char prev_name[]);
     222                void __cfaabi_dbg_record_thrd(thread$ & this, bool park, const char prev_name[]);
    223223        #else
    224224                #define __cfaabi_dbg_record_thrd(x, y, z)
     
    228228        extern "Cforall" {
    229229
    230                 static inline $thread *& get_next( $thread & this ) __attribute__((const)) {
     230                static inline thread$ *& get_next( thread$ & this ) __attribute__((const)) {
    231231                        return this.link.next;
    232232                }
    233233
    234                 static inline [$thread *&, $thread *& ] __get( $thread & this ) __attribute__((const)) {
     234                static inline [thread$ *&, thread$ *& ] __get( thread$ & this ) __attribute__((const)) {
    235235                        return this.node.[next, prev];
    236236                }
    237237
    238                 static inline $thread * volatile & ?`next ( $thread * this )  __attribute__((const)) {
     238                static inline thread$ * volatile & ?`next ( thread$ * this )  __attribute__((const)) {
    239239                        return this->seqable.next;
    240240                }
    241241
    242                 static inline $thread *& Back( $thread * this ) __attribute__((const)) {
     242                static inline thread$ *& Back( thread$ * this ) __attribute__((const)) {
    243243                        return this->seqable.back;
    244244                }
    245245
    246                 static inline $thread *& Next( $thread * this ) __attribute__((const)) {
     246                static inline thread$ *& Next( thread$ * this ) __attribute__((const)) {
    247247                                return this->seqable.next;
    248248                }
    249249
    250                 static inline bool listed( $thread * this ) {
     250                static inline bool listed( thread$ * this ) {
    251251                        return this->seqable.next != 0p;
    252252                }
     
    258258                }
    259259
    260                 static inline void ?{}(__monitor_group_t & this, struct $monitor ** data, __lock_size_t size, fptr_t func) {
     260                static inline void ?{}(__monitor_group_t & this, struct monitor$ ** data, __lock_size_t size, fptr_t func) {
    261261                        (this.data){data};
    262262                        (this.size){size};
  • libcfa/src/concurrency/io.cfa

    rcf444b6 r6ff08d8  
    9090        static inline unsigned __flush( struct $io_context & );
    9191        static inline __u32 __release_sqes( struct $io_context & );
    92         extern void __kernel_unpark( $thread * thrd );
     92        extern void __kernel_unpark( thread$ * thrd );
    9393
    9494        bool __cfa_io_drain( processor * proc ) {
  • libcfa/src/concurrency/io/types.hfa

    rcf444b6 r6ff08d8  
    179179
    180180static inline {
    181         $thread * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {
     181        thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {
    182182                this.result = result;
    183183                return fulfil(this.self, do_unpark);
  • libcfa/src/concurrency/kernel.cfa

    rcf444b6 r6ff08d8  
    110110#endif
    111111
    112 extern $thread * mainThread;
     112extern thread$ * mainThread;
    113113extern processor * mainProcessor;
    114114
    115115//-----------------------------------------------------------------------------
    116116// Kernel Scheduling logic
    117 static $thread * __next_thread(cluster * this);
    118 static $thread * __next_thread_slow(cluster * this);
    119 static inline bool __must_unpark( $thread * thrd ) __attribute((nonnull(1)));
    120 static void __run_thread(processor * this, $thread * dst);
     117static thread$ * __next_thread(cluster * this);
     118static thread$ * __next_thread_slow(cluster * this);
     119static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1)));
     120static void __run_thread(processor * this, thread$ * dst);
    121121static void __wake_one(cluster * cltr);
    122122
     
    181181                __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this);
    182182
    183                 $thread * readyThread = 0p;
     183                thread$ * readyThread = 0p;
    184184                MAIN_LOOP:
    185185                for() {
     
    388388// runThread runs a thread by context switching
    389389// from the processor coroutine to the target thread
    390 static void __run_thread(processor * this, $thread * thrd_dst) {
     390static void __run_thread(processor * this, thread$ * thrd_dst) {
    391391        /* paranoid */ verify( ! __preemption_enabled() );
    392392        /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted);
     
    396396        __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name);
    397397
    398         $coroutine * proc_cor = get_coroutine(this->runner);
     398        coroutine$ * proc_cor = get_coroutine(this->runner);
    399399
    400400        // set state of processor coroutine to inactive
     
    415415                /* paranoid */ verify( thrd_dst->context.SP );
    416416                /* paranoid */ verify( thrd_dst->state != Halted );
    417                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
    418                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
     417                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor
     418                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor
    419419                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
    420420
     
    428428
    429429                /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary );
    430                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
    431                 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination $thread %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
     430                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst );
     431                /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst );
    432432                /* paranoid */ verify( thrd_dst->context.SP );
    433433                /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr );
     
    497497void returnToKernel() {
    498498        /* paranoid */ verify( ! __preemption_enabled() );
    499         $coroutine * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
    500         $thread * thrd_src = kernelTLS().this_thread;
     499        coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
     500        thread$ * thrd_src = kernelTLS().this_thread;
    501501
    502502        __STATS( thrd_src->last_proc = kernelTLS().this_processor; )
     
    526526
    527527        /* paranoid */ verify( ! __preemption_enabled() );
    528         /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too small.\n", thrd_src );
    529         /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning $thread %p has been corrupted.\n StackPointer too large.\n", thrd_src );
     528        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src );
     529        /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src );
    530530}
    531531
     
    533533// Scheduler routines
    534534// KERNEL ONLY
    535 static void __schedule_thread( $thread * thrd ) {
     535static void __schedule_thread( thread$ * thrd ) {
    536536        /* paranoid */ verify( ! __preemption_enabled() );
    537537        /* paranoid */ verify( ready_schedule_islocked());
     
    583583}
    584584
    585 void schedule_thread$( $thread * thrd ) {
     585void schedule_thread$( thread$ * thrd ) {
    586586        ready_schedule_lock();
    587587                __schedule_thread( thrd );
     
    590590
    591591// KERNEL ONLY
    592 static inline $thread * __next_thread(cluster * this) with( *this ) {
     592static inline thread$ * __next_thread(cluster * this) with( *this ) {
    593593        /* paranoid */ verify( ! __preemption_enabled() );
    594594
    595595        ready_schedule_lock();
    596                 $thread * thrd = pop_fast( this );
     596                thread$ * thrd = pop_fast( this );
    597597        ready_schedule_unlock();
    598598
     
    602602
    603603// KERNEL ONLY
    604 static inline $thread * __next_thread_slow(cluster * this) with( *this ) {
     604static inline thread$ * __next_thread_slow(cluster * this) with( *this ) {
    605605        /* paranoid */ verify( ! __preemption_enabled() );
    606606
    607607        ready_schedule_lock();
    608                 $thread * thrd;
     608                thread$ * thrd;
    609609                for(25) {
    610610                        thrd = pop_slow( this );
     
    620620}
    621621
    622 static inline bool __must_unpark( $thread * thrd ) {
     622static inline bool __must_unpark( thread$ * thrd ) {
    623623        int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST);
    624624        switch(old_ticket) {
     
    636636}
    637637
    638 void __kernel_unpark( $thread * thrd ) {
     638void __kernel_unpark( thread$ * thrd ) {
    639639        /* paranoid */ verify( ! __preemption_enabled() );
    640640        /* paranoid */ verify( ready_schedule_islocked());
     
    651651}
    652652
    653 void unpark( $thread * thrd ) {
     653void unpark( thread$ * thrd ) {
    654654        if( !thrd ) return;
    655655
     
    675675        // Should never return
    676676        void __cfactx_thrd_leave() {
    677                 $thread * thrd = active_thread();
    678                 $monitor * this = &thrd->self_mon;
     677                thread$ * thrd = active_thread();
     678                monitor$ * this = &thrd->self_mon;
    679679
    680680                // Lock the monitor now
     
    688688                /* paranoid */ verify( kernelTLS().this_thread == thrd );
    689689                /* paranoid */ verify( thrd->context.SP );
    690                 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd );
    691                 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd );
     690                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
     691                /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
    692692
    693693                thrd->state = Halting;
     
    707707bool force_yield( __Preemption_Reason reason ) {
    708708        __disable_interrupts_checked();
    709                 $thread * thrd = kernelTLS().this_thread;
     709                thread$ * thrd = kernelTLS().this_thread;
    710710                /* paranoid */ verify(thrd->state == Active);
    711711
     
    819819//=============================================================================================
    820820void __kernel_abort_msg( char * abort_text, int abort_text_size ) {
    821         $thread * thrd = __cfaabi_tls.this_thread;
     821        thread$ * thrd = __cfaabi_tls.this_thread;
    822822
    823823        if(thrd) {
  • libcfa/src/concurrency/kernel.hfa

    rcf444b6 r6ff08d8  
    115115        // it is not a particularly safe scheme as it can make processors less homogeneous
    116116        struct {
    117                 $thread * thrd;
     117                thread$ * thrd;
    118118        } init;
    119119
     
    215215        // List of threads
    216216        __spinlock_t thread_list_lock;
    217         __dllist_t(struct $thread) threads;
     217        __dllist_t(struct thread$) threads;
    218218        unsigned int nthreads;
    219219
  • libcfa/src/concurrency/kernel/fwd.hfa

    rcf444b6 r6ff08d8  
    2424#endif
    2525
    26 struct $thread;
     26struct thread$;
    2727struct processor;
    2828struct cluster;
     
    3636        extern "Cforall" {
    3737                extern __attribute__((aligned(128))) thread_local struct KernelThreadData {
    38                         struct $thread          * volatile this_thread;
     38                        struct thread$          * volatile this_thread;
    3939                        struct processor        * volatile this_processor;
    4040                        volatile bool sched_lock;
     
    120120        extern "Cforall" {
    121121                extern void park( void );
    122                 extern void unpark( struct $thread * this );
    123                 static inline struct $thread * active_thread () {
    124                         struct $thread * t = publicTLS_get( this_thread );
     122                extern void unpark( struct thread$ * this );
     123                static inline struct thread$ * active_thread () {
     124                        struct thread$ * t = publicTLS_get( this_thread );
    125125                        /* paranoid */ verify( t );
    126126                        return t;
     
    144144                // Semaphore which only supports a single thread
    145145                struct single_sem {
    146                         struct $thread * volatile ptr;
     146                        struct thread$ * volatile ptr;
    147147                };
    148148
     
    156156                        bool wait(single_sem & this) {
    157157                                for() {
    158                                         struct $thread * expected = this.ptr;
     158                                        struct thread$ * expected = this.ptr;
    159159                                        if(expected == 1p) {
    160160                                                if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
     
    175175                        bool post(single_sem & this) {
    176176                                for() {
    177                                         struct $thread * expected = this.ptr;
     177                                        struct thread$ * expected = this.ptr;
    178178                                        if(expected == 1p) return false;
    179179                                        if(expected == 0p) {
     
    200200                        //     1p     : fulfilled (wait won't block)
    201201                        // any thread : a thread is currently waiting
    202                         struct $thread * volatile ptr;
     202                        struct thread$ * volatile ptr;
    203203                };
    204204
     
    214214                        bool wait(oneshot & this) {
    215215                                for() {
    216                                         struct $thread * expected = this.ptr;
     216                                        struct thread$ * expected = this.ptr;
    217217                                        if(expected == 1p) return false;
    218218                                        /* paranoid */ verify( expected == 0p );
     
    227227                        // Mark as fulfilled, wake thread if needed
    228228                        // return true if a thread was unparked
    229                         $thread * post(oneshot & this, bool do_unpark = true) {
    230                                 struct $thread * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST);
     229                        thread$ * post(oneshot & this, bool do_unpark = true) {
     230                                struct thread$ * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST);
    231231                                if( got == 0p ) return 0p;
    232232                                if(do_unpark) unpark( got );
     
    343343                        // from the server side, mark the future as fulfilled
    344344                        // delete it if needed
    345                         $thread * fulfil( future_t & this, bool do_unpark = true  ) {
     345                        thread$ * fulfil( future_t & this, bool do_unpark = true  ) {
    346346                                for() {
    347347                                        struct oneshot * expected = this.ptr;
     
    364364                                        if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
    365365                                                if( expected == 0p ) { /* paranoid */ verify( this.ptr == 1p); return 0p; }
    366                                                 $thread * ret = post( *expected, do_unpark );
     366                                                thread$ * ret = post( *expected, do_unpark );
    367367                                                __atomic_store_n( &this.ptr, 1p, __ATOMIC_SEQ_CST);
    368368                                                return ret;
  • libcfa/src/concurrency/kernel/startup.cfa

    rcf444b6 r6ff08d8  
    7777static void __kernel_first_resume( processor * this );
    7878static void __kernel_last_resume ( processor * this );
    79 static void init(processor & this, const char name[], cluster & _cltr, $thread * initT);
     79static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT);
    8080static void deinit(processor & this);
    8181static void doregister( struct cluster & cltr );
     
    8383static void register_tls( processor * this );
    8484static void unregister_tls( processor * this );
    85 static void ?{}( $coroutine & this, current_stack_info_t * info);
    86 static void ?{}( $thread & this, current_stack_info_t * info);
     85static void ?{}( coroutine$ & this, current_stack_info_t * info);
     86static void ?{}( thread$ & this, current_stack_info_t * info);
    8787static void ?{}(processorCtx_t & this) {}
    8888static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info);
     
    105105KERNEL_STORAGE(cluster,              mainCluster);
    106106KERNEL_STORAGE(processor,            mainProcessor);
    107 KERNEL_STORAGE($thread,              mainThread);
     107KERNEL_STORAGE(thread$,              mainThread);
    108108KERNEL_STORAGE(__stack_t,            mainThreadCtx);
    109109KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock);
     
    114114cluster              * mainCluster;
    115115processor            * mainProcessor;
    116 $thread              * mainThread;
     116thread$              * mainThread;
    117117__scheduler_RWLock_t * __scheduler_lock;
    118118
     
    203203        // SKULLDUGGERY: the mainThread steals the process main thread
    204204        // which will then be scheduled by the mainProcessor normally
    205         mainThread = ($thread *)&storage_mainThread;
     205        mainThread = (thread$ *)&storage_mainThread;
    206206        current_stack_info_t info;
    207207        info.storage = (__stack_t*)&storage_mainThreadCtx;
     
    397397
    398398static void __kernel_first_resume( processor * this ) {
    399         $thread * src = mainThread;
    400         $coroutine * dst = get_coroutine(this->runner);
     399        thread$ * src = mainThread;
     400        coroutine$ * dst = get_coroutine(this->runner);
    401401
    402402        /* paranoid */ verify( ! __preemption_enabled() );
     
    430430// KERNEL_ONLY
    431431static void __kernel_last_resume( processor * this ) {
    432         $coroutine * src = &mainThread->self_cor;
    433         $coroutine * dst = get_coroutine(this->runner);
     432        coroutine$ * src = &mainThread->self_cor;
     433        coroutine$ * dst = get_coroutine(this->runner);
    434434
    435435        /* paranoid */ verify( ! __preemption_enabled() );
     
    459459//-----------------------------------------------------------------------------
    460460// Main thread construction
    461 static void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) {
     461static void ?{}( coroutine$ & this, current_stack_info_t * info) with( this ) {
    462462        stack.storage = info->storage;
    463463        with(*stack.storage) {
     
    474474}
    475475
    476 static void ?{}( $thread & this, current_stack_info_t * info) with( this ) {
     476static void ?{}( thread$ & this, current_stack_info_t * info) with( this ) {
    477477        ticket = TICKET_RUNNING;
    478478        state = Start;
     
    506506}
    507507
    508 static void init(processor & this, const char name[], cluster & _cltr, $thread * initT) with( this ) {
     508static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT) with( this ) {
    509509        this.name = name;
    510510        this.cltr = &_cltr;
     
    545545}
    546546
    547 void ?{}(processor & this, const char name[], cluster & _cltr, $thread * initT) {
     547void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {
    548548        ( this.terminated ){};
    549549        ( this.runner ){};
     
    663663}
    664664
    665 void doregister( cluster * cltr, $thread & thrd ) {
     665void doregister( cluster * cltr, thread$ & thrd ) {
    666666        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    667667        cltr->nthreads += 1;
     
    670670}
    671671
    672 void unregister( cluster * cltr, $thread & thrd ) {
     672void unregister( cluster * cltr, thread$ & thrd ) {
    673673        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
    674674        remove(cltr->threads, thrd );
  • libcfa/src/concurrency/kernel_private.hfa

    rcf444b6 r6ff08d8  
    4646}
    4747
    48 void schedule_thread$( $thread * ) __attribute__((nonnull (1)));
     48void schedule_thread$( thread$ * ) __attribute__((nonnull (1)));
    4949
    5050extern bool __preemption_enabled();
    5151
    5252//release/wake-up the following resources
    53 void __thread_finish( $thread * thrd );
     53void __thread_finish( thread$ * thrd );
    5454
    5555//-----------------------------------------------------------------------------
     
    9595
    9696__cfaabi_dbg_debug_do(
    97         extern void __cfaabi_dbg_thread_register  ( $thread * thrd );
    98         extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
     97        extern void __cfaabi_dbg_thread_register  ( thread$ * thrd );
     98        extern void __cfaabi_dbg_thread_unregister( thread$ * thrd );
    9999)
    100100
     
    105105//-----------------------------------------------------------------------------
    106106// Utils
    107 void doregister( struct cluster * cltr, struct $thread & thrd );
    108 void unregister( struct cluster * cltr, struct $thread & thrd );
     107void doregister( struct cluster * cltr, struct thread$ & thrd );
     108void unregister( struct cluster * cltr, struct thread$ & thrd );
    109109
    110110//-----------------------------------------------------------------------------
     
    300300// push thread onto a ready queue for a cluster
    301301// returns true if the list was previously empty, false otherwise
    302 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool local);
     302__attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool local);
    303303
    304304//-----------------------------------------------------------------------
     
    306306// returns 0p if empty
    307307// May return 0p spuriously
    308 __attribute__((hot)) struct $thread * pop_fast(struct cluster * cltr);
     308__attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr);
    309309
    310310//-----------------------------------------------------------------------
     
    312312// returns 0p if empty
    313313// May return 0p spuriously
    314 __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr);
     314__attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr);
    315315
    316316//-----------------------------------------------------------------------
     
    318318// returns 0p if empty
    319319// guaranteed to find any threads added before this call
    320 __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr);
     320__attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr);
    321321
    322322//-----------------------------------------------------------------------
  • libcfa/src/concurrency/locks.cfa

    rcf444b6 r6ff08d8  
    3232
    3333                // waiting thread
    34                 struct $thread * t;
     34                struct thread$ * t;
    3535
    3636                // shadow field
     
    4545        P9_EMBEDDED( info_thread(L), dlink(info_thread(L)) )
    4646
    47         void ?{}( info_thread(L) & this, $thread * t, uintptr_t info, L * l ) {
     47        void ?{}( info_thread(L) & this, thread$ * t, uintptr_t info, L * l ) {
    4848                this.t = t;
    4949                this.info = info;
     
    7171void lock( blocking_lock & this ) with( this ) {
    7272        lock( lock __cfaabi_dbg_ctx2 );
    73         $thread * thrd = active_thread();
     73        thread$ * thrd = active_thread();
    7474
    7575        // single acquisition lock is held by current thread
     
    117117
    118118void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
    119         $thread * t = &try_pop_front( blocked_threads );
     119        thread$ * t = &try_pop_front( blocked_threads );
    120120        owner = t;
    121121        recursion_count = ( t ? 1 : 0 );
     
    142142}
    143143
    144 void on_notify( blocking_lock & this, $thread * t ) with( this ) {
     144void on_notify( blocking_lock & this, thread$ * t ) with( this ) {
    145145        lock( lock __cfaabi_dbg_ctx2 );
    146146        // lock held
     
    366366}
    367367
    368 $thread * V (semaphore & this, const bool doUnpark ) with( this ) {
    369         $thread * thrd = 0p;
     368thread$ * V (semaphore & this, const bool doUnpark ) with( this ) {
     369        thread$ * thrd = 0p;
    370370        lock( lock __cfaabi_dbg_ctx2 );
    371371        count += 1;
     
    384384
    385385bool V(semaphore & this) with( this ) {
    386         $thread * thrd = V(this, true);
     386        thread$ * thrd = V(this, true);
    387387        return thrd != 0p;
    388388}
    389389
    390390bool V(semaphore & this, unsigned diff) with( this ) {
    391         $thread * thrd = 0p;
     391        thread$ * thrd = 0p;
    392392        lock( lock __cfaabi_dbg_ctx2 );
    393393        int release = max(-count, (int)diff);
  • libcfa/src/concurrency/locks.hfa

    rcf444b6 r6ff08d8  
    3939struct Semaphore0nary {
    4040        __spinlock_t lock; // needed to protect
    41         mpsc_queue($thread) queue;
    42 };
    43 
    44 static inline bool P(Semaphore0nary & this, $thread * thrd) {
     41        mpsc_queue(thread$) queue;
     42};
     43
     44static inline bool P(Semaphore0nary & this, thread$ * thrd) {
    4545        /* paranoid */ verify(!thrd`next);
    4646        /* paranoid */ verify(!(&(*thrd)`next));
     
    5151
    5252static inline bool P(Semaphore0nary & this) {
    53     $thread * thrd = active_thread();
     53    thread$ * thrd = active_thread();
    5454    P(this, thrd);
    5555    park();
     
    5757}
    5858
    59 static inline $thread * V(Semaphore0nary & this, bool doUnpark = true) {
    60         $thread * next;
     59static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) {
     60        thread$ * next;
    6161        lock(this.lock __cfaabi_dbg_ctx2);
    6262                for (;;) {
     
    124124static inline bool P(ThreadBenaphore & this, bool wait)   { return wait ? P(this) : tryP(this); }
    125125
    126 static inline $thread * V(ThreadBenaphore & this, bool doUnpark = true) {
     126static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) {
    127127        if (V(this.ben)) return 0p;
    128128        return V(this.sem, doUnpark);
     
    134134        __spinlock_t lock;
    135135        int count;
    136         __queue_t($thread) waiting;
     136        __queue_t(thread$) waiting;
    137137};
    138138
     
    142142bool   V (semaphore & this);
    143143bool   V (semaphore & this, unsigned count);
    144 $thread * V (semaphore & this, bool );
     144thread$ * V (semaphore & this, bool );
    145145
    146146//----------
     
    156156static inline size_t on_wait  ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); }
    157157static inline void   on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    158 static inline void   on_notify( single_acquisition_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
     158static inline void   on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
    159159
    160160//----------
     
    170170static inline size_t on_wait  ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); }
    171171static inline void   on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); }
    172 static inline void   on_notify( owner_lock & this, struct $thread * t ) { on_notify( (blocking_lock &)this, t ); }
     172static inline void   on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); }
    173173
    174174struct fast_lock {
    175         $thread * volatile owner;
     175        thread$ * volatile owner;
    176176        ThreadBenaphore sem;
    177177};
     
    179179static inline void ?{}(fast_lock & this) { this.owner = 0p; }
    180180
    181 static inline bool $try_lock(fast_lock & this, $thread * thrd) {
    182     $thread * exp = 0p;
     181static inline bool $try_lock(fast_lock & this, thread$ * thrd) {
     182    thread$ * exp = 0p;
    183183    return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
    184184}
     
    186186static inline void lock( fast_lock & this ) __attribute__((artificial));
    187187static inline void lock( fast_lock & this ) {
    188         $thread * thrd = active_thread();
     188        thread$ * thrd = active_thread();
    189189        /* paranoid */verify(thrd != this.owner);
    190190
     
    197197static inline bool try_lock( fast_lock & this ) __attribute__((artificial));
    198198static inline bool try_lock ( fast_lock & this ) {
    199         $thread * thrd = active_thread();
     199        thread$ * thrd = active_thread();
    200200        /* paranoid */ verify(thrd != this.owner);
    201201        return $try_lock(this, thrd);
    202202}
    203203
    204 static inline $thread * unlock( fast_lock & this ) __attribute__((artificial));
    205 static inline $thread * unlock( fast_lock & this ) {
     204static inline thread$ * unlock( fast_lock & this ) __attribute__((artificial));
     205static inline thread$ * unlock( fast_lock & this ) {
    206206        /* paranoid */ verify(active_thread() == this.owner);
    207207
     
    216216static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; }
    217217static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); }
    218 static inline void on_notify( fast_lock &, struct $thread * t ) { unpark(t); }
     218static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); }
    219219
    220220struct mcs_node {
     
    248248
    249249        // Current thread owning the lock
    250         struct $thread * owner;
     250        struct thread$ * owner;
    251251
    252252        // List of blocked threads
    253         dlist( $thread ) blocked_threads;
     253        dlist( thread$ ) blocked_threads;
    254254
    255255        // Used for comparing and exchanging
     
    343343        // block until signalled
    344344        while (block(this)) if(try_lock_contention(this)) return true;
    345        
     345
    346346        // this should never be reached as block(this) always returns true
    347347        return false;
     
    385385        // block until signalled
    386386        while (block(this)) if(try_lock_contention(this)) return true;
    387        
     387
    388388        // this should never be reached as block(this) always returns true
    389389        return false;
     
    395395    if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return;
    396396        lock( spinlock __cfaabi_dbg_ctx2 );
    397         $thread * t = &try_pop_front( blocked_threads );
     397        thread$ * t = &try_pop_front( blocked_threads );
    398398        unlock( spinlock );
    399399        unpark( t );
    400400}
    401401
    402 static inline void on_notify(linear_backoff_then_block_lock & this, struct $thread * t ) { unpark(t); }
     402static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); }
    403403static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; }
    404404static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock_improved(this); }
     
    408408trait is_blocking_lock(L & | sized(L)) {
    409409        // For synchronization locks to use when acquiring
    410         void on_notify( L &, struct $thread * );
     410        void on_notify( L &, struct thread$ * );
    411411
    412412        // For synchronization locks to use when releasing
     
    442442                int count;
    443443        };
    444        
     444
    445445
    446446        void  ?{}( condition_variable(L) & this );
  • libcfa/src/concurrency/monitor.cfa

    rcf444b6 r6ff08d8  
    55// file "LICENCE" distributed with Cforall.
    66//
    7 // $monitor.c --
     7// monitor.cfa --
    88//
    99// Author           : Thierry Delisle
     
    2828//-----------------------------------------------------------------------------
    2929// Forward declarations
    30 static inline void __set_owner ( $monitor * this, $thread * owner );
    31 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner );
    32 static inline void set_mask  ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
    33 static inline void reset_mask( $monitor * this );
    34 
    35 static inline $thread * next_thread( $monitor * this );
    36 static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors );
     30static inline void __set_owner ( monitor$ * this, thread$ * owner );
     31static inline void __set_owner ( monitor$ * storage [], __lock_size_t count, thread$ * owner );
     32static inline void set_mask  ( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask );
     33static inline void reset_mask( monitor$ * this );
     34
     35static inline thread$ * next_thread( monitor$ * this );
     36static inline bool is_accepted( monitor$ * this, const __monitor_group_t & monitors );
    3737
    3838static inline void lock_all  ( __spinlock_t * locks [], __lock_size_t count );
    39 static inline void lock_all  ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
     39static inline void lock_all  ( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );
    4040static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count );
    41 static inline void unlock_all( $monitor * locks [], __lock_size_t count );
    42 
    43 static inline void save   ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
    44 static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    45 
    46 static inline void init     ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    47 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    48 
    49 static inline $thread *        check_condition   ( __condition_criterion_t * );
     41static inline void unlock_all( monitor$ * locks [], __lock_size_t count );
     42
     43static inline void save   ( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );
     44static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
     45
     46static inline void init     ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     47static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     48
     49static inline thread$ *        check_condition   ( __condition_criterion_t * );
    5050static inline void                 brand_condition   ( condition & );
    51 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count );
     51static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t &, monitor$ * monitors [], __lock_size_t count );
    5252
    5353forall(T & | sized( T ))
    5454static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val );
    5555static inline __lock_size_t count_max    ( const __waitfor_mask_t & mask );
    56 static inline __lock_size_t aggregate    ( $monitor * storage [], const __waitfor_mask_t & mask );
     56static inline __lock_size_t aggregate    ( monitor$ * storage [], const __waitfor_mask_t & mask );
    5757
    5858//-----------------------------------------------------------------------------
     
    6969
    7070#define monitor_ctx( mons, cnt )                                /* Define that create the necessary struct for internal/external scheduling operations */ \
    71         $monitor ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
     71        monitor$ ** monitors = mons;                          /* Save the targeted monitors                                                          */ \
    7272        __lock_size_t count = cnt;                                /* Save the count to a local variable                                                  */ \
    7373        unsigned int recursions[ count ];                         /* Save the current recursion levels to restore them later                             */ \
     
    8282// Enter/Leave routines
    8383// Enter single monitor
    84 static void __enter( $monitor * this, const __monitor_group_t & group ) {
    85         $thread * thrd = active_thread();
     84static void __enter( monitor$ * this, const __monitor_group_t & group ) {
     85        thread$ * thrd = active_thread();
    8686
    8787        // Lock the monitor spinlock
     
    141141}
    142142
    143 static void __dtor_enter( $monitor * this, fptr_t func, bool join ) {
    144         $thread * thrd = active_thread();
     143static void __dtor_enter( monitor$ * this, fptr_t func, bool join ) {
     144        thread$ * thrd = active_thread();
    145145        #if defined( __CFA_WITH_VERIFY__ )
    146146                bool is_thrd = this == &thrd->self_mon;
     
    173173        // because join will not release the monitor after it executed.
    174174        // to avoid that it sets the owner to the special value thrd | 1p before exiting
    175         else if( this->owner == ($thread*)(1 | (uintptr_t)thrd) ) {
     175        else if( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) {
    176176                // restore the owner and just return
    177177                __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this);
     
    191191
    192192        __lock_size_t count = 1;
    193         $monitor ** monitors = &this;
     193        monitor$ ** monitors = &this;
    194194        __monitor_group_t group = { &this, 1, func };
    195195        if( is_accepted( this, group) ) {
     
    243243
    244244// Leave single monitor
    245 void __leave( $monitor * this ) {
     245void __leave( monitor$ * this ) {
    246246        // Lock the monitor spinlock
    247247        lock( this->lock __cfaabi_dbg_ctx2 );
     
    263263
    264264        // Get the next thread, will be null on low contention monitor
    265         $thread * new_owner = next_thread( this );
     265        thread$ * new_owner = next_thread( this );
    266266
    267267        // Check the new owner is consistent with who we wake-up
     
    278278
    279279// Leave single monitor for the last time
    280 void __dtor_leave( $monitor * this, bool join ) {
     280void __dtor_leave( monitor$ * this, bool join ) {
    281281        __cfaabi_dbg_debug_do(
    282282                if( active_thread() != this->owner ) {
     
    288288        )
    289289
    290         this->owner = ($thread*)(1 | (uintptr_t)this->owner);
    291 }
    292 
    293 void __thread_finish( $thread * thrd ) {
    294         $monitor * this = &thrd->self_mon;
     290        this->owner = (thread$*)(1 | (uintptr_t)this->owner);
     291}
     292
     293void __thread_finish( thread$ * thrd ) {
     294        monitor$ * this = &thrd->self_mon;
    295295
    296296        // Lock the monitor now
     
    298298        /* paranoid */ verify( this->lock.lock );
    299299        /* paranoid */ verify( thrd->context.SP );
    300         /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread %p has been corrupted.\n StackPointer too large.\n", thrd );
    301         /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread %p has been corrupted.\n StackPointer too small.\n", thrd );
     300        /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd );
     301        /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd );
    302302        /* paranoid */ verify( ! __preemption_enabled() );
    303303
     
    311311
    312312        // Fetch the next thread, can be null
    313         $thread * new_owner = next_thread( this );
     313        thread$ * new_owner = next_thread( this );
    314314
    315315        // Mark the state as fully halted
     
    336336// Leave multiple monitor
    337337// relies on the monitor array being sorted
    338 static inline void leave($monitor * monitors [], __lock_size_t count) {
     338static inline void leave(monitor$ * monitors [], __lock_size_t count) {
    339339        for( __lock_size_t i = count - 1; i >= 0; i--) {
    340340                __leave( monitors[i] );
     
    344344// Ctor for monitor guard
    345345// Sorts monitors before entering
    346 void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) {
    347         $thread * thrd = active_thread();
     346void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {
     347        thread$ * thrd = active_thread();
    348348
    349349        // Store current array
     
    385385// Ctor for monitor guard
    386386// Sorts monitors before entering
    387 void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func, bool join ) {
     387void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {
    388388        // optimization
    389         $thread * thrd = active_thread();
     389        thread$ * thrd = active_thread();
    390390
    391391        // Store current array
     
    415415//-----------------------------------------------------------------------------
    416416// Internal scheduling types
    417 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     417void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    418418        this.waiting_thread = waiting_thread;
    419419        this.count = count;
     
    429429}
    430430
    431 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) {
     431void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
    432432        this.ready  = false;
    433433        this.target = target;
     
    463463        // Find the next thread(s) to run
    464464        __lock_size_t thread_count = 0;
    465         $thread * threads[ count ];
     465        thread$ * threads[ count ];
    466466        __builtin_memset( threads, 0, sizeof( threads ) );
    467467
     
    471471        // Remove any duplicate threads
    472472        for( __lock_size_t i = 0; i < count; i++) {
    473                 $thread * new_owner = next_thread( monitors[i] );
     473                thread$ * new_owner = next_thread( monitors[i] );
    474474                insert_unique( threads, thread_count, new_owner );
    475475        }
     
    501501        //Some more checking in debug
    502502        __cfaabi_dbg_debug_do(
    503                 $thread * this_thrd = active_thread();
     503                thread$ * this_thrd = active_thread();
    504504                if ( this.monitor_count != this_thrd->monitors.size ) {
    505505                        abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size );
     
    555555
    556556        //Find the thread to run
    557         $thread * signallee = pop_head( this.blocked )->waiting_thread;
     557        thread$ * signallee = pop_head( this.blocked )->waiting_thread;
    558558        __set_owner( monitors, count, signallee );
    559559
     
    608608        // Create one!
    609609        __lock_size_t max = count_max( mask );
    610         $monitor * mon_storage[max];
     610        monitor$ * mon_storage[max];
    611611        __builtin_memset( mon_storage, 0, sizeof( mon_storage ) );
    612612        __lock_size_t actual_count = aggregate( mon_storage, mask );
     
    626626        {
    627627                // Check if the entry queue
    628                 $thread * next; int index;
     628                thread$ * next; int index;
    629629                [next, index] = search_entry_queue( mask, monitors, count );
    630630
     
    636636                                verifyf( accepted.size == 1,  "ERROR: Accepted dtor has more than 1 mutex parameter." );
    637637
    638                                 $monitor * mon2dtor = accepted[0];
     638                                monitor$ * mon2dtor = accepted[0];
    639639                                verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." );
    640640
     
    730730// Utilities
    731731
    732 static inline void __set_owner( $monitor * this, $thread * owner ) {
     732static inline void __set_owner( monitor$ * this, thread$ * owner ) {
    733733        /* paranoid */ verify( this->lock.lock );
    734734
     
    740740}
    741741
    742 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) {
     742static inline void __set_owner( monitor$ * monitors [], __lock_size_t count, thread$ * owner ) {
    743743        /* paranoid */ verify ( monitors[0]->lock.lock );
    744744        /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] );
     
    753753}
    754754
    755 static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
     755static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {
    756756        for( __lock_size_t i = 0; i < count; i++) {
    757757                storage[i]->mask = mask;
     
    759759}
    760760
    761 static inline void reset_mask( $monitor * this ) {
     761static inline void reset_mask( monitor$ * this ) {
    762762        this->mask.accepted = 0p;
    763763        this->mask.data = 0p;
     
    765765}
    766766
    767 static inline $thread * next_thread( $monitor * this ) {
     767static inline thread$ * next_thread( monitor$ * this ) {
    768768        //Check the signaller stack
    769769        __cfaabi_dbg_print_safe( "Kernel :  mon %p AS-stack top %p\n", this, this->signal_stack.top);
     
    781781        // No signaller thread
    782782        // Get the next thread in the entry_queue
    783         $thread * new_owner = pop_head( this->entry_queue );
     783        thread$ * new_owner = pop_head( this->entry_queue );
    784784        /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this );
    785785        /* paranoid */ verify( !new_owner || new_owner->link.next == 0p );
     
    789789}
    790790
    791 static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) {
     791static inline bool is_accepted( monitor$ * this, const __monitor_group_t & group ) {
    792792        __acceptable_t * it = this->mask.data; // Optim
    793793        __lock_size_t count = this->mask.size;
     
    811811}
    812812
    813 static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     813static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    814814        for( __lock_size_t i = 0; i < count; i++) {
    815815                (criteria[i]){ monitors[i], waiter };
     
    819819}
    820820
    821 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
     821static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {
    822822        for( __lock_size_t i = 0; i < count; i++) {
    823823                (criteria[i]){ monitors[i], waiter };
     
    835835}
    836836
    837 static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
     837static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {
    838838        for( __lock_size_t i = 0; i < count; i++ ) {
    839839                __spinlock_t * l = &source[i]->lock;
     
    849849}
    850850
    851 static inline void unlock_all( $monitor * locks [], __lock_size_t count ) {
     851static inline void unlock_all( monitor$ * locks [], __lock_size_t count ) {
    852852        for( __lock_size_t i = 0; i < count; i++ ) {
    853853                unlock( locks[i]->lock );
     
    856856
    857857static inline void save(
    858         $monitor * ctx [],
     858        monitor$ * ctx [],
    859859        __lock_size_t count,
    860860        __attribute((unused)) __spinlock_t * locks [],
     
    869869
    870870static inline void restore(
    871         $monitor * ctx [],
     871        monitor$ * ctx [],
    872872        __lock_size_t count,
    873873        __spinlock_t * locks [],
     
    887887// 2 - Checks if all the monitors are ready to run
    888888//     if so return the thread to run
    889 static inline $thread * check_condition( __condition_criterion_t * target ) {
     889static inline thread$ * check_condition( __condition_criterion_t * target ) {
    890890        __condition_node_t * node = target->owner;
    891891        unsigned short count = node->count;
     
    910910
    911911static inline void brand_condition( condition & this ) {
    912         $thread * thrd = active_thread();
     912        thread$ * thrd = active_thread();
    913913        if( !this.monitors ) {
    914914                // __cfaabi_dbg_print_safe( "Branding\n" );
     
    916916                this.monitor_count = thrd->monitors.size;
    917917
    918                 this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) );
     918                this.monitors = (monitor$ **)malloc( this.monitor_count * sizeof( *this.monitors ) );
    919919                for( int i = 0; i < this.monitor_count; i++ ) {
    920920                        this.monitors[i] = thrd->monitors[i];
     
    923923}
    924924
    925 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) {
    926 
    927         __queue_t($thread) & entry_queue = monitors[0]->entry_queue;
     925static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor$ * monitors [], __lock_size_t count ) {
     926
     927        __queue_t(thread$) & entry_queue = monitors[0]->entry_queue;
    928928
    929929        // For each thread in the entry-queue
    930         for(    $thread ** thrd_it = &entry_queue.head;
     930        for(    thread$ ** thrd_it = &entry_queue.head;
    931931                (*thrd_it) != 1p;
    932932                thrd_it = &(*thrd_it)->link.next
     
    972972}
    973973
    974 static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) {
     974static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask ) {
    975975        __lock_size_t size = 0;
    976976        for( __lock_size_t i = 0; i < mask.size; i++ ) {
  • libcfa/src/concurrency/monitor.hfa

    rcf444b6 r6ff08d8  
    2323
    2424trait is_monitor(T &) {
    25         $monitor * get_monitor( T & );
     25        monitor$ * get_monitor( T & );
    2626        void ^?{}( T & mutex );
    2727};
    2828
    29 static inline void ?{}($monitor & this) with( this ) {
     29static inline void ?{}(monitor$ & this) with( this ) {
    3030        lock{};
    3131        entry_queue{};
     
    3939}
    4040
    41 static inline void ^?{}($monitor & ) {}
     41static inline void ^?{}(monitor$ & ) {}
    4242
    4343struct monitor_guard_t {
    44         $monitor **     m;
     44        monitor$ **     m;
    4545        __lock_size_t           count;
    4646        __monitor_group_t prev;
    4747};
    4848
    49 void ?{}( monitor_guard_t & this, $monitor ** m, __lock_size_t count, void (*func)() );
     49void ?{}( monitor_guard_t & this, monitor$ ** m, __lock_size_t count, void (*func)() );
    5050void ^?{}( monitor_guard_t & this );
    5151
    5252struct monitor_dtor_guard_t {
    53         $monitor *    m;
     53        monitor$ *    m;
    5454        __monitor_group_t prev;
    5555        bool join;
    5656};
    5757
    58 void ?{}( monitor_dtor_guard_t & this, $monitor ** m, void (*func)(), bool join );
     58void ?{}( monitor_dtor_guard_t & this, monitor$ ** m, void (*func)(), bool join );
    5959void ^?{}( monitor_dtor_guard_t & this );
    6060
     
    7373
    7474        // The monitor this criterion concerns
    75         $monitor * target;
     75        monitor$ * target;
    7676
    7777        // The parent node to which this criterion belongs
     
    8888struct __condition_node_t {
    8989        // Thread that needs to be woken when all criteria are met
    90         $thread * waiting_thread;
     90        thread$ * waiting_thread;
    9191
    9292        // Array of criteria (Criterions are contiguous in memory)
     
    107107}
    108108
    109 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info );
     109void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
    110110void ?{}(__condition_criterion_t & this );
    111 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t * owner );
     111void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
    112112
    113113struct condition {
     
    116116
    117117        // Array of monitor pointers (Monitors are NOT contiguous in memory)
    118         $monitor ** monitors;
     118        monitor$ ** monitors;
    119119
    120120        // Number of monitors in the array
  • libcfa/src/concurrency/mutex.cfa

    rcf444b6 r6ff08d8  
    122122        recursion_count--;
    123123        if( recursion_count == 0 ) {
    124                 $thread * thrd = pop_head( blocked_threads );
     124                thread$ * thrd = pop_head( blocked_threads );
    125125                owner = thrd;
    126126                recursion_count = (thrd ? 1 : 0);
  • libcfa/src/concurrency/mutex.hfa

    rcf444b6 r6ff08d8  
    3636
    3737        // List of blocked threads
    38         __queue_t(struct $thread) blocked_threads;
     38        __queue_t(struct thread$) blocked_threads;
    3939
    4040        // Locked flag
     
    5555
    5656        // List of blocked threads
    57         __queue_t(struct $thread) blocked_threads;
     57        __queue_t(struct thread$) blocked_threads;
    5858
    5959        // Current thread owning the lock
    60         struct $thread * owner;
     60        struct thread$ * owner;
    6161
    6262        // Number of recursion level
     
    8383
    8484        // List of blocked threads
    85         __queue_t(struct $thread) blocked_threads;
     85        __queue_t(struct thread$) blocked_threads;
    8686};
    8787
  • libcfa/src/concurrency/preemption.cfa

    rcf444b6 r6ff08d8  
    6161// FwdDeclarations : timeout handlers
    6262static void preempt( processor   * this );
    63 static void timeout( $thread * this );
     63static void timeout( thread$ * this );
    6464
    6565// FwdDeclarations : Signal handlers
     
    420420
    421421// reserved for future use
    422 static void timeout( $thread * this ) {
     422static void timeout( thread$ * this ) {
    423423        unpark( this );
    424424}
  • libcfa/src/concurrency/ready_queue.cfa

    rcf444b6 r6ff08d8  
    6767#endif
    6868
    69 static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
    70 static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
    71 static inline struct $thread * search(struct cluster * cltr);
     69static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));
     70static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));
     71static inline struct thread$ * search(struct cluster * cltr);
    7272static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred);
    7373
     
    274274//-----------------------------------------------------------------------
    275275#if defined(USE_CPU_WORK_STEALING)
    276         __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) {
     276        __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) {
    277277                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    278278
     
    316316
    317317        // Pop from the ready queue from a given cluster
    318         __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
     318        __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    319319                /* paranoid */ verify( lanes.count > 0 );
    320320                /* paranoid */ verify( kernelTLS().this_processor );
     
    371371                                proc->rdq.target = -1u;
    372372                                if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
    373                                         $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
     373                                        thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    374374                                        proc->rdq.last = target;
    375375                                        if(t) return t;
     
    379379                        unsigned last = proc->rdq.last;
    380380                        if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) {
    381                                 $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));
     381                                thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));
    382382                                if(t) return t;
    383383                        }
     
    389389                for(READYQ_SHARD_FACTOR) {
    390390                        unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
    391                         if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
     391                        if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    392392                }
    393393
     
    396396        }
    397397
    398         __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
     398        __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
    399399                processor * const proc = kernelTLS().this_processor;
    400400                unsigned last = proc->rdq.last;
    401401                if(last != -1u) {
    402                         struct $thread * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));
     402                        struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));
    403403                        if(t) return t;
    404404                        proc->rdq.last = -1u;
     
    408408                return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    409409        }
    410         __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) {
     410        __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    411411                return search(cltr);
    412412        }
     
    435435        }
    436436
    437         __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) {
     437        __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) {
    438438                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    439439
     
    482482
    483483        // Pop from the ready queue from a given cluster
    484         __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
     484        __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    485485                /* paranoid */ verify( lanes.count > 0 );
    486486                /* paranoid */ verify( kernelTLS().this_processor );
     
    506506
    507507                        // try popping from the 2 picked lists
    508                         struct $thread * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
     508                        struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));
    509509                        if(thrd) {
    510510                                return thrd;
     
    516516        }
    517517
    518         __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
    519         __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) {
     518        __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); }
     519        __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) {
    520520                return search(cltr);
    521521        }
    522522#endif
    523523#if defined(USE_WORK_STEALING)
    524         __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool push_local) with (cltr->ready_queue) {
     524        __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) {
    525525                __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr);
    526526
     
    576576
    577577        // Pop from the ready queue from a given cluster
    578         __attribute__((hot)) $thread * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
     578        __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) {
    579579                /* paranoid */ verify( lanes.count > 0 );
    580580                /* paranoid */ verify( kernelTLS().this_processor );
     
    598598                        const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff;
    599599                        if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) {
    600                                 $thread * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
     600                                thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));
    601601                                if(t) return t;
    602602                        }
     
    605605                for(READYQ_SHARD_FACTOR) {
    606606                        unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR);
    607                         if($thread * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
     607                        if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;
    608608                }
    609609                return 0p;
    610610        }
    611611
    612         __attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
     612        __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) {
    613613                unsigned i = __tls_rand() % lanes.count;
    614614                return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal));
    615615        }
    616616
    617         __attribute__((hot)) struct $thread * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
     617        __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) {
    618618                return search(cltr);
    619619        }
     
    628628//-----------------------------------------------------------------------
    629629// try to pop from a lane given by index w
    630 static inline struct $thread * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
     630static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
    631631        __STATS( stats.attempt++; )
    632632
     
    651651
    652652        // Actually pop the list
    653         struct $thread * thrd;
     653        struct thread$ * thrd;
    654654        unsigned long long tsv;
    655655        [thrd, tsv] = pop(lane);
     
    678678// try to pop from any lanes making sure you don't miss any threads push
    679679// before the start of the function
    680 static inline struct $thread * search(struct cluster * cltr) with (cltr->ready_queue) {
     680static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) {
    681681        /* paranoid */ verify( lanes.count > 0 );
    682682        unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED );
     
    684684        for(i; count) {
    685685                unsigned idx = (offset + i) % count;
    686                 struct $thread * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
     686                struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));
    687687                if(thrd) {
    688688                        return thrd;
     
    719719//-----------------------------------------------------------------------
    720720// Given 2 indexes, pick the list with the oldest push an try to pop from it
    721 static inline struct $thread * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
     721static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {
    722722        // Pick the bet list
    723723        int w = i;
     
    854854                                // As long as we can pop from this lane to push the threads somewhere else in the queue
    855855                                while(!is_empty(lanes.data[idx])) {
    856                                         struct $thread * thrd;
     856                                        struct thread$ * thrd;
    857857                                        unsigned long long _;
    858858                                        [thrd, _] = pop(lanes.data[idx]);
  • libcfa/src/concurrency/ready_subqueue.hfa

    rcf444b6 r6ff08d8  
    77// Intrusives lanes which are used by the relaxed ready queue
    88struct __attribute__((aligned(128))) __intrusive_lane_t {
    9         struct $thread * prev;
     9        struct thread$ * prev;
    1010
    1111        // spin lock protecting the queue
     
    2020
    2121// Get the head pointer (one before the first element) from the anchor
    22 static inline $thread * mock_head(const __intrusive_lane_t & this) {
    23         $thread * rhead = ($thread *)(
    24                 (uintptr_t)( &this.anchor ) - __builtin_offsetof( $thread, link )
     22static inline thread$ * mock_head(const __intrusive_lane_t & this) {
     23        thread$ * rhead = (thread$ *)(
     24                (uintptr_t)( &this.anchor ) - __builtin_offsetof( thread$, link )
    2525        );
    2626        return rhead;
     
    3838
    3939        // We add a boat-load of assertions here because the anchor code is very fragile
    40         /* paranoid */ _Static_assert( offsetof( $thread, link ) == offsetof(__intrusive_lane_t, anchor) );
    41         /* paranoid */ verify( offsetof( $thread, link ) == offsetof(__intrusive_lane_t, anchor) );
    42         /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.anchor) );
     40        /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );
     41        /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) );
     42        /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) );
    4343        /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next );
    4444        /* paranoid */ verify( &mock_head(this)->link.ts   == &this.anchor.ts   );
     
    6161// Push a thread onto this lane
    6262// returns true of lane was empty before push, false otherwise
    63 static inline void push( __intrusive_lane_t & this, $thread * node ) {
     63static inline void push( __intrusive_lane_t & this, thread$ * node ) {
    6464        /* paranoid */ verify( this.lock );
    6565        /* paranoid */ verify( node->link.next == 0p );
     
    9191// returns popped
    9292// returns true of lane was empty before push, false otherwise
    93 static inline [* $thread, unsigned long long] pop( __intrusive_lane_t & this ) {
     93static inline [* thread$, unsigned long long] pop( __intrusive_lane_t & this ) {
    9494        /* paranoid */ verify( this.lock );
    9595        /* paranoid */ verify( this.anchor.next != 0p );
     
    9999        // Get the relevant nodes locally
    100100        unsigned long long ts = this.anchor.ts;
    101         $thread * node = this.anchor.next;
     101        thread$ * node = this.anchor.next;
    102102        this.anchor.next = node->link.next;
    103103        this.anchor.ts   = node->link.ts;
  • libcfa/src/concurrency/thread.cfa

    rcf444b6 r6ff08d8  
    2727//-----------------------------------------------------------------------------
    2828// Thread ctors and dtors
    29 void ?{}($thread & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
     29void ?{}(thread$ & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {
    3030        context{ 0p, 0p };
    3131        self_cor{ name, storage, storageSize };
     
    5757}
    5858
    59 void ^?{}($thread& this) with( this ) {
     59void ^?{}(thread$& this) with( this ) {
    6060        #if defined( __CFA_WITH_VERIFY__ )
    6161                canary = 0xDEADDEADDEADDEADp;
     
    8787void ?{}( thread_dtor_guard_t & this,
    8888                T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) {
    89         $monitor * m = get_monitor(thrd);
    90         $thread * desc = get_thread(thrd);
     89        monitor$ * m = get_monitor(thrd);
     90        thread$ * desc = get_thread(thrd);
    9191
    9292        // Setup the monitor guard
     
    130130forall( T & | is_thread(T) )
    131131void __thrd_start( T & this, void (*main_p)(T &) ) {
    132         $thread * this_thrd = get_thread(this);
     132        thread$ * this_thrd = get_thread(this);
    133133
    134134        disable_interrupts();
  • libcfa/src/concurrency/thread.hfa

    rcf444b6 r6ff08d8  
    2929        void ^?{}(T& mutex this);
    3030        void main(T& this);
    31         $thread* get_thread(T& this);
     31        thread$ * get_thread(T& this);
    3232};
    3333
     
    4545// Inline getters for threads/coroutines/monitors
    4646forall( T & | is_thread(T) )
    47 static inline $coroutine* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; }
     47static inline coroutine$ * get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; }
    4848
    4949forall( T & | is_thread(T) )
    50 static inline $monitor  * get_monitor  (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; }
     50static inline monitor$   * get_monitor  (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; }
    5151
    52 static inline $coroutine* get_coroutine($thread * this) __attribute__((const)) { return &this->self_cor; }
    53 static inline $monitor  * get_monitor  ($thread * this) __attribute__((const)) { return &this->self_mon; }
     52static inline coroutine$ * get_coroutine(thread$ * this) __attribute__((const)) { return &this->self_cor; }
     53static inline monitor$   * get_monitor  (thread$ * this) __attribute__((const)) { return &this->self_mon; }
    5454
    5555//-----------------------------------------------------------------------------
     
    6262//-----------------------------------------------------------------------------
    6363// Ctors and dtors
    64 void ?{}($thread & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );
    65 void ^?{}($thread & this);
     64void ?{}(thread$ & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );
     65void ^?{}(thread$ & this);
    6666
    67 static inline void ?{}($thread & this)                                                                  { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; }
    68 static inline void ?{}($thread & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; }
    69 static inline void ?{}($thread & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
    70 static inline void ?{}($thread & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, 0p, 65000 }; }
    71 static inline void ?{}($thread & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0p, stackSize }; }
    72 static inline void ?{}($thread & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
    73 static inline void ?{}($thread & this, const char * const name)                                         { this{ name, *mainCluster, 0p, 65000 }; }
    74 static inline void ?{}($thread & this, const char * const name, struct cluster & cl )                   { this{ name, cl, 0p, 65000 }; }
    75 static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }
     67static inline void ?{}(thread$ & this)                                                                  { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; }
     68static inline void ?{}(thread$ & this, size_t stackSize )                                               { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; }
     69static inline void ?{}(thread$ & this, void * storage, size_t storageSize )                             { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }
     70static inline void ?{}(thread$ & this, struct cluster & cl )                                            { this{ "Anonymous Thread", cl, 0p, 65000 }; }
     71static inline void ?{}(thread$ & this, struct cluster & cl, size_t stackSize )                          { this{ "Anonymous Thread", cl, 0p, stackSize }; }
     72static inline void ?{}(thread$ & this, struct cluster & cl, void * storage, size_t storageSize )        { this{ "Anonymous Thread", cl, storage, storageSize }; }
     73static inline void ?{}(thread$ & this, const char * const name)                                         { this{ name, *mainCluster, 0p, 65000 }; }
     74static inline void ?{}(thread$ & this, const char * const name, struct cluster & cl )                   { this{ name, cl, 0p, 65000 }; }
     75static inline void ?{}(thread$ & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }
    7676
    7777struct thread_dtor_guard_t {
     
    111111// Unpark a thread, if the thread is already blocked, schedule it
    112112//                  if the thread is not yet block, signal that it should rerun immediately
    113 void unpark( $thread * this );
     113void unpark( thread$ * this );
    114114
    115115forall( T & | is_thread(T) )
Note: See TracChangeset for help on using the changeset viewer.