Ignore:
Timestamp:
Jun 3, 2022, 3:10:01 PM (3 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
Children:
7affcda
Parents:
bf0263c (diff), fc134a48 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

Location:
libcfa/src/concurrency
Files:
24 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/alarm.cfa

    rbf0263c r90a8125  
    141141//=============================================================================================
    142142
    143 void sleep( Duration duration ) {
     143void sleep( Duration duration ) libcfa_public {
    144144        alarm_node_t node = { active_thread(), duration, 0`s };
    145145
  • libcfa/src/concurrency/clib/cfathread.cfa

    rbf0263c r90a8125  
    237237
    238238typedef ThreadCancelled(cfathread_object) cfathread_exception;
    239 typedef ThreadCancelled_vtable(cfathread_object) cfathread_vtable;
     239typedef vtable(ThreadCancelled(cfathread_object)) cfathread_vtable;
    240240
    241241void defaultResumptionHandler(ThreadCancelled(cfathread_object) & except) {
     
    283283
    284284typedef ThreadCancelled(__cfainit) __cfainit_exception;
    285 typedef ThreadCancelled_vtable(__cfainit) __cfainit_vtable;
     285typedef vtable(ThreadCancelled(__cfainit)) __cfainit_vtable;
    286286
    287287void defaultResumptionHandler(ThreadCancelled(__cfainit) & except) {
     
    326326}
    327327
     328#pragma GCC visibility push(default)
     329
    328330//================================================================================
    329331// Main Api
    330332extern "C" {
    331         int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {
     333        int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public {
    332334                *cl = new();
    333335                return 0;
    334336        }
    335337
    336         cfathread_cluster_t cfathread_cluster_self(void) {
     338        cfathread_cluster_t cfathread_cluster_self(void) libcfa_public {
    337339                return active_cluster();
    338340        }
    339341
    340         int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {
     342        int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public {
    341343                #if !defined(__CFA_NO_STATISTICS__)
    342344                        print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO );
  • libcfa/src/concurrency/coroutine.cfa

    rbf0263c r90a8125  
    4848//-----------------------------------------------------------------------------
    4949forall(T &)
    50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {
     50void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public {
    5151        dst->virtual_table = src->virtual_table;
    5252        dst->the_coroutine = src->the_coroutine;
     
    5555
    5656forall(T &)
    57 const char * msg(CoroutineCancelled(T) *) {
     57const char * msg(CoroutineCancelled(T) *) libcfa_public {
    5858        return "CoroutineCancelled(...)";
    5959}
     
    6262forall(T & | is_coroutine(T))
    6363void __cfaehm_cancelled_coroutine(
    64                 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {
     64                T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ) libcfa_public {
    6565        verify( desc->cancellation );
    6666        desc->state = Cancelled;
     
    8989
    9090void __stack_prepare( __stack_info_t * this, size_t create_size );
    91 void __stack_clean  ( __stack_info_t * this );
     91static void __stack_clean  ( __stack_info_t * this );
    9292
    9393//-----------------------------------------------------------------------------
     
    114114}
    115115
    116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {
     116void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) {
    117117        (this.context){0p, 0p};
    118118        (this.stack){storage, storageSize};
     
    124124}
    125125
    126 void ^?{}(coroutine$& this) {
     126void ^?{}(coroutine$& this) libcfa_public {
    127127        if(this.state != Halted && this.state != Start && this.state != Primed) {
    128128                coroutine$ * src = active_coroutine();
     
    146146// Part of the Public API
    147147// Not inline since only ever called once per coroutine
    148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
    149 void prime(T& cor) {
     148forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
     149void prime(T& cor) libcfa_public {
    150150        coroutine$* this = get_coroutine(cor);
    151151        assert(this->state == Start);
     
    155155}
    156156
    157 [void *, size_t] __stack_alloc( size_t storageSize ) {
     157static [void *, size_t] __stack_alloc( size_t storageSize ) {
    158158        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    159159        assert(__page_size != 0l);
     
    193193}
    194194
    195 void __stack_clean  ( __stack_info_t * this ) {
     195static void __stack_clean  ( __stack_info_t * this ) {
    196196        void * storage = this->storage->limit;
    197197
     
    215215}
    216216
    217 void __stack_prepare( __stack_info_t * this, size_t create_size ) {
     217void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public {
    218218        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
    219219        bool userStack;
  • libcfa/src/concurrency/coroutine.hfa

    rbf0263c r90a8125  
    2222//-----------------------------------------------------------------------------
    2323// Exception thrown from resume when a coroutine stack is cancelled.
    24 EHM_FORALL_EXCEPTION(CoroutineCancelled, (coroutine_t &), (coroutine_t)) (
     24forall(coroutine_t &)
     25exception CoroutineCancelled {
    2526        coroutine_t * the_coroutine;
    2627        exception_t * the_exception;
    27 );
     28};
    2829
    2930forall(T &)
     
    3738// Anything that implements this trait can be resumed.
    3839// Anything that is resumed is a coroutine.
    39 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled, (T))) {
     40trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled(T))) {
    4041        void main(T & this);
    4142        coroutine$ * get_coroutine(T & this);
     
    6061//-----------------------------------------------------------------------------
    6162// Public coroutine API
    62 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
     63forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
    6364void prime(T & cor);
    6465
     
    113114
    114115extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */);
    115 extern void __stack_clean  ( __stack_info_t * this );
    116 
    117116
    118117// Suspend implementation inlined for performance
     
    141140forall(T & | is_coroutine(T))
    142141void __cfaehm_cancelled_coroutine(
    143         T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) );
     142        T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) );
    144143
    145144// Resume implementation inlined for performance
    146 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); })
     145forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
    147146static inline T & resume(T & cor) {
    148147        // optimization : read TLS once and reuse it
  • libcfa/src/concurrency/exception.cfa

    rbf0263c r90a8125  
    6464extern "C" {
    6565
    66 struct exception_context_t * this_exception_context(void) {
     66struct exception_context_t * this_exception_context(void) libcfa_public {
    6767        return &__get_stack( active_coroutine() )->exception_context;
    6868}
    6969
    70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {
     70_Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public {
    7171        _Unwind_Stop_Fn stop_func;
    7272        void * stop_param;
  • libcfa/src/concurrency/invoke.c

    rbf0263c r90a8125  
    3636extern void enable_interrupts( _Bool poll );
    3737
    38 void __cfactx_invoke_coroutine(
     38libcfa_public void __cfactx_invoke_coroutine(
    3939        void (*main)(void *),
    4040        void *this
     
    7070}
    7171
    72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
     72libcfa_public void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));
    7373void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) {
    7474        _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor );
     
    7777}
    7878
    79 void __cfactx_invoke_thread(
     79libcfa_public void __cfactx_invoke_thread(
    8080        void (*main)(void *),
    8181        void *this
     
    9898}
    9999
    100 void __cfactx_start(
     100libcfa_public void __cfactx_start(
    101101        void (*main)(void *),
    102102        struct coroutine$ * cor,
  • libcfa/src/concurrency/io.cfa

    rbf0263c r90a8125  
    221221                        const unsigned long long ctsc = rdtscl();
    222222
    223                         if(proc->io.target == MAX) {
     223                        if(proc->io.target == UINT_MAX) {
    224224                                uint64_t chaos = __tls_rand();
    225225                                unsigned ext = chaos & 0xff;
     
    232232                        else {
    233233                                const unsigned target = proc->io.target;
    234                                 /* paranoid */ verify( io.tscs[target].tv != MAX );
     234                                /* paranoid */ verify( io.tscs[target].tv != ULLONG_MAX );
    235235                                HELP: if(target < ctxs_count) {
    236236                                        const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io);
     
    246246                                        __STATS__( true, io.calls.helped++; )
    247247                                }
    248                                 proc->io.target = MAX;
     248                                proc->io.target = UINT_MAX;
    249249                        }
    250250                }
     
    340340        // for convenience, return both the index and the pointer to the sqe
    341341        // sqe == &sqes[idx]
    342         struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {
     342        struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public {
    343343                // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want);
    344344
     
    419419        }
    420420
    421         void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {
     421        void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public {
    422422                // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager");
    423423
  • libcfa/src/concurrency/io/call.cfa.in

    rbf0263c r90a8125  
    139139// I/O Interface
    140140//=============================================================================================
     141#pragma GCC visibility push(default)
    141142"""
    142143
  • libcfa/src/concurrency/io/setup.cfa

    rbf0263c r90a8125  
    2626
    2727#if !defined(CFA_HAVE_LINUX_IO_URING_H)
    28         void ?{}(io_context_params & this) {}
     28        void ?{}(io_context_params & this) libcfa_public {}
    2929
    3030        void  ?{}($io_context & this, struct cluster & cl) {}
     
    6666#pragma GCC diagnostic pop
    6767
    68         void ?{}(io_context_params & this) {
     68        void ?{}(io_context_params & this) libcfa_public {
    6969                this.num_entries = 256;
    7070        }
  • libcfa/src/concurrency/io/types.hfa

    rbf0263c r90a8125  
    1717#pragma once
    1818
     19#include <limits.h>
     20
    1921extern "C" {
    2022        #include <linux/types.h>
     
    2527#include "iofwd.hfa"
    2628#include "kernel/fwd.hfa"
    27 #include "limits.hfa"
    2829
    2930#if defined(CFA_HAVE_LINUX_IO_URING_H)
     
    140141                const __u32 tail = *this->cq.tail;
    141142
    142                 if(head == tail) return MAX;
     143                if(head == tail) return ULLONG_MAX;
    143144
    144145                return this->cq.ts;
  • libcfa/src/concurrency/kernel.cfa

    rbf0263c r90a8125  
    389389
    390390// KERNEL_ONLY
    391 void returnToKernel() {
     391static void returnToKernel() {
    392392        /* paranoid */ verify( ! __preemption_enabled() );
    393393        coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner);
     
    547547}
    548548
    549 void unpark( thread$ * thrd, unpark_hint hint ) {
     549void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public {
    550550        if( !thrd ) return;
    551551
     
    558558}
    559559
    560 void park( void ) {
     560void park( void ) libcfa_public {
    561561        __disable_interrupts_checked();
    562562                /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION );
     
    601601
    602602// KERNEL ONLY
    603 bool force_yield( __Preemption_Reason reason ) {
     603bool force_yield( __Preemption_Reason reason ) libcfa_public {
    604604        __disable_interrupts_checked();
    605605                thread$ * thrd = kernelTLS().this_thread;
     
    849849//-----------------------------------------------------------------------------
    850850// Debug
    851 bool threading_enabled(void) __attribute__((const)) {
     851bool threading_enabled(void) __attribute__((const)) libcfa_public {
    852852        return true;
    853853}
     
    856856// Statistics
    857857#if !defined(__CFA_NO_STATISTICS__)
    858         void print_halts( processor & this ) {
     858        void print_halts( processor & this ) libcfa_public {
    859859                this.print_halts = true;
    860860        }
     
    873873        }
    874874
    875         void crawl_cluster_stats( cluster & this ) {
     875        static void crawl_cluster_stats( cluster & this ) {
    876876                // Stop the world, otherwise stats could get really messed-up
    877877                // this doesn't solve all problems but does solve many
     
    889889
    890890
    891         void print_stats_now( cluster & this, int flags ) {
     891        void print_stats_now( cluster & this, int flags ) libcfa_public {
    892892                crawl_cluster_stats( this );
    893893                __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this );
  • libcfa/src/concurrency/kernel.hfa

    rbf0263c r90a8125  
    4949
    5050// Coroutine used py processors for the 2-step context switch
    51 coroutine processorCtx_t {
     51
     52struct processorCtx_t {
     53        struct coroutine$ self;
    5254        struct processor * proc;
    5355};
  • libcfa/src/concurrency/kernel/cluster.cfa

    rbf0263c r90a8125  
    4949
    5050// returns the maximum number of processors the RWLock support
    51 __attribute__((weak)) unsigned __max_processors() {
     51__attribute__((weak)) unsigned __max_processors() libcfa_public {
    5252        const char * max_cores_s = getenv("CFA_MAX_PROCESSORS");
    5353        if(!max_cores_s) {
     
    233233                                        if(is_empty(sl)) {
    234234                                                assert( sl.anchor.next == 0p );
    235                                                 assert( sl.anchor.ts   == -1llu );
     235                                                assert( sl.anchor.ts   == MAX );
    236236                                                assert( mock_head(sl)  == sl.prev );
    237237                                        } else {
    238238                                                assert( sl.anchor.next != 0p );
    239                                                 assert( sl.anchor.ts   != -1llu );
     239                                                assert( sl.anchor.ts   != MAX );
    240240                                                assert( mock_head(sl)  != sl.prev );
    241241                                        }
     
    259259                /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count);
    260260                it->rdq.id = valrq;
    261                 it->rdq.target = MAX;
     261                it->rdq.target = UINT_MAX;
    262262                valrq += __shard_factor.readyq;
    263263                #if defined(CFA_HAVE_LINUX_IO_URING_H)
    264264                        it->io.ctx->cq.id = valio;
    265                         it->io.target = MAX;
     265                        it->io.target = UINT_MAX;
    266266                        valio += __shard_factor.io;
    267267                #endif
     
    472472        this.prev = mock_head(this);
    473473        this.anchor.next = 0p;
    474         this.anchor.ts   = -1llu;
     474        this.anchor.ts   = MAX;
    475475        #if !defined(__CFA_NO_STATISTICS__)
    476476                this.cnt  = 0;
     
    484484        /* paranoid */ verify( &mock_head(this)->link.ts   == &this.anchor.ts   );
    485485        /* paranoid */ verify( mock_head(this)->link.next == 0p );
    486         /* paranoid */ verify( mock_head(this)->link.ts   == -1llu );
     486        /* paranoid */ verify( mock_head(this)->link.ts   == MAX );
    487487        /* paranoid */ verify( mock_head(this) == this.prev );
    488488        /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 );
     
    495495        // Make sure the list is empty
    496496        /* paranoid */ verify( this.anchor.next == 0p );
    497         /* paranoid */ verify( this.anchor.ts   == -1llu );
     497        /* paranoid */ verify( this.anchor.ts   == MAX );
    498498        /* paranoid */ verify( mock_head(this)  == this.prev );
    499499}
  • libcfa/src/concurrency/kernel/cluster.hfa

    rbf0263c r90a8125  
    1919#include "kernel/private.hfa"
    2020
    21 #include "limits.hfa"
     21#include <limits.h>
    2222
    2323//-----------------------------------------------------------------------
     
    3737
    3838static inline void touch_tsc(__timestamp_t * tscs, size_t idx, unsigned long long ts_prev, unsigned long long ts_next) {
    39         if (ts_next == MAX) return;
     39        if (ts_next == ULLONG_MAX) return;
    4040        unsigned long long now = rdtscl();
    4141        unsigned long long pma = __atomic_load_n(&tscs[ idx ].ma, __ATOMIC_RELAXED);
     
    5959        for(i; shard_factor) {
    6060                unsigned long long ptsc = ts(data[start + i]);
    61                 if(ptsc != -1ull) {
     61                if(ptsc != ULLONG_MAX) {
    6262                        /* paranoid */ verify( start + i < count );
    6363                        unsigned long long tsc = moving_average(ctsc, ptsc, tscs[start + i].ma);
  • libcfa/src/concurrency/kernel/private.hfa

    rbf0263c r90a8125  
    109109//-----------------------------------------------------------------------------
    110110// Processor
    111 void main(processorCtx_t *);
     111void main(processorCtx_t &);
     112static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; }
    112113
    113114void * __create_pthread( pthread_t *, void * (*)(void *), void * );
  • libcfa/src/concurrency/kernel/startup.cfa

    rbf0263c r90a8125  
    120120#endif
    121121
    122 cluster              * mainCluster;
     122cluster              * mainCluster libcfa_public;
    123123processor            * mainProcessor;
    124124thread$              * mainThread;
     
    169169};
    170170
    171 void ?{}( current_stack_info_t & this ) {
     171static void ?{}( current_stack_info_t & this ) {
    172172        __stack_context_t ctx;
    173173        CtxGet( ctx );
     
    209209        // Construct the processor context of the main processor
    210210        void ?{}(processorCtx_t & this, processor * proc) {
    211                 (this.__cor){ "Processor" };
    212                 this.__cor.starter = 0p;
     211                (this.self){ "Processor" };
     212                this.self.starter = 0p;
    213213                this.proc = proc;
    214214        }
     
    507507        self_mon_p = &self_mon;
    508508        link.next = 0p;
    509         link.ts   = -1llu;
     509        link.ts   = MAX;
    510510        preferred = ready_queue_new_preferred();
    511511        last_proc = 0p;
     
    526526// Construct the processor context of non-main processors
    527527static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) {
    528         (this.__cor){ info };
     528        (this.self){ info };
    529529        this.proc = proc;
    530530}
     
    578578}
    579579
    580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {
     580void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public {
    581581        ( this.terminated ){};
    582582        ( this.runner ){};
     
    591591}
    592592
    593 void ?{}(processor & this, const char name[], cluster & _cltr) {
     593void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public {
    594594        (this){name, _cltr, 0p};
    595595}
    596596
    597597extern size_t __page_size;
    598 void ^?{}(processor & this) with( this ){
     598void ^?{}(processor & this) libcfa_public with( this ) {
    599599        /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) );
    600600        __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this);
     
    623623}
    624624
    625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {
     625void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) {
    626626        this.name = name;
    627627        this.preemption_rate = preemption_rate;
     
    658658}
    659659
    660 void ^?{}(cluster & this) {
     660void ^?{}(cluster & this) libcfa_public {
    661661        destroy(this.io.arbiter);
    662662
  • libcfa/src/concurrency/locks.cfa

    rbf0263c r90a8125  
    2424#include <stdlib.hfa>
    2525
     26#pragma GCC visibility push(default)
     27
    2628//-----------------------------------------------------------------------------
    2729// info_thread
     
    116118}
    117119
    118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
     120static void pop_and_set_new_owner( blocking_lock & this ) with( this ) {
    119121        thread$ * t = &try_pop_front( blocked_threads );
    120122        owner = t;
     
    192194        void ^?{}( alarm_node_wrap(L) & this ) { }
    193195
    194         void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
     196        static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {
    195197                // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin.
    196198                lock( cond->lock __cfaabi_dbg_ctx2 );
     
    216218
    217219        // this casts the alarm node to our wrapped type since we used type erasure
    218         void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
     220        static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }
    219221}
    220222
     
    233235        void ^?{}( condition_variable(L) & this ){ }
    234236
    235         void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
     237        static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {
    236238                if(&popped != 0p) {
    237239                        popped.signalled = true;
     
    278280        int counter( condition_variable(L) & this ) with(this) { return count; }
    279281
    280         size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
     282        static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {
    281283                // add info_thread to waiting queue
    282284                insert_last( blocked_threads, *i );
     
    291293
    292294        // helper for wait()'s' with no timeout
    293         void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
     295        static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {
    294296                lock( lock __cfaabi_dbg_ctx2 );
    295297                size_t recursion_count = queue_and_get_recursion(this, &i);
     
    308310
    309311        // helper for wait()'s' with a timeout
    310         void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
     312        static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {
    311313                lock( lock __cfaabi_dbg_ctx2 );
    312314                size_t recursion_count = queue_and_get_recursion(this, &info);
     
    343345        // fast_cond_var
    344346        void  ?{}( fast_cond_var(L) & this ){
    345                 this.blocked_threads{}; 
     347                this.blocked_threads{};
    346348                #ifdef __CFA_DEBUG__
    347349                this.lock_used = 0p;
  • libcfa/src/concurrency/monitor.cfa

    rbf0263c r90a8125  
    4444static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );
    4545
     46static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
     47static inline void ?{}(__condition_criterion_t & this );
     48static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
     49
    4650static inline void init     ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
    4751static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );
     
    243247
    244248// Leave single monitor
    245 void __leave( monitor$ * this ) {
     249static void __leave( monitor$ * this ) {
    246250        // Lock the monitor spinlock
    247251        lock( this->lock __cfaabi_dbg_ctx2 );
     
    278282
    279283// Leave single monitor for the last time
    280 void __dtor_leave( monitor$ * this, bool join ) {
     284static void __dtor_leave( monitor$ * this, bool join ) {
    281285        __cfaabi_dbg_debug_do(
    282286                if( active_thread() != this->owner ) {
     
    344348// Ctor for monitor guard
    345349// Sorts monitors before entering
    346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {
     350void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public {
    347351        thread$ * thrd = active_thread();
    348352
     
    369373}
    370374
    371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {
     375void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public {
    372376        this{ m, count, 0p };
    373377}
     
    375379
    376380// Dtor for monitor guard
    377 void ^?{}( monitor_guard_t & this ) {
     381void ^?{}( monitor_guard_t & this ) libcfa_public {
    378382        // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count);
    379383
     
    389393// Ctor for monitor guard
    390394// Sorts monitors before entering
    391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {
     395void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public {
    392396        // optimization
    393397        thread$ * thrd = active_thread();
     
    409413
    410414// Dtor for monitor guard
    411 void ^?{}( monitor_dtor_guard_t & this ) {
     415void ^?{}( monitor_dtor_guard_t & this ) libcfa_public {
    412416        // Leave the monitors in order
    413417        __dtor_leave( this.m, this.join );
     
    419423//-----------------------------------------------------------------------------
    420424// Internal scheduling types
    421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
     425static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {
    422426        this.waiting_thread = waiting_thread;
    423427        this.count = count;
     
    426430}
    427431
    428 void ?{}(__condition_criterion_t & this ) with( this ) {
     432static void ?{}(__condition_criterion_t & this ) with( this ) {
    429433        ready  = false;
    430434        target = 0p;
     
    433437}
    434438
    435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
     439static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {
    436440        this.ready  = false;
    437441        this.target = target;
     
    442446//-----------------------------------------------------------------------------
    443447// Internal scheduling
    444 void wait( condition & this, uintptr_t user_info = 0 ) {
     448void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public {
    445449        brand_condition( this );
    446450
     
    496500}
    497501
    498 bool signal( condition & this ) {
     502bool signal( condition & this ) libcfa_public {
    499503        if( is_empty( this ) ) { return false; }
    500504
     
    538542}
    539543
    540 bool signal_block( condition & this ) {
     544bool signal_block( condition & this ) libcfa_public {
    541545        if( !this.blocked.head ) { return false; }
    542546
     
    586590
    587591// Access the user_info of the thread waiting at the front of the queue
    588 uintptr_t front( condition & this ) {
     592uintptr_t front( condition & this ) libcfa_public {
    589593        verifyf( !is_empty(this),
    590594                "Attempt to access user data on an empty condition.\n"
     
    608612//              setup mask
    609613//              block
    610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {
     614void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public {
    611615        // This statment doesn't have a contiguous list of monitors...
    612616        // Create one!
     
    994998// Can't be accepted since a mutex stmt is effectively an anonymous routine
    995999// Thus we do not need a monitor group
    996 void lock( monitor$ * this ) {
     1000void lock( monitor$ * this ) libcfa_public {
    9971001        thread$ * thrd = active_thread();
    9981002
     
    10461050// Leave routine for mutex stmt
    10471051// Is just a wrapper around __leave for the is_lock trait to see
    1048 void unlock( monitor$ * this ) { __leave( this ); }
     1052void unlock( monitor$ * this ) libcfa_public { __leave( this ); }
    10491053
    10501054// Local Variables: //
  • libcfa/src/concurrency/monitor.hfa

    rbf0263c r90a8125  
    119119}
    120120
    121 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
    122 void ?{}(__condition_criterion_t & this );
    123 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
     121// void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );
     122// void ?{}(__condition_criterion_t & this );
     123// void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );
    124124
    125125struct condition {
  • libcfa/src/concurrency/preemption.cfa

    rbf0263c r90a8125  
    3838#endif
    3939
    40 __attribute__((weak)) Duration default_preemption() {
     40__attribute__((weak)) Duration default_preemption() libcfa_public {
    4141        const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION");
    4242        if(!preempt_rate_s) {
     
    238238//----------
    239239// special case for preemption since used often
    240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {
     240__attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public {
    241241        // create a assembler label before
    242242        // marked as clobber all to avoid movement
     
    276276// Get data from the TLS block
    277277// struct asm_region __cfaasm_get;
    278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__)); //no inline to avoid problems
     278uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems
    279279uintptr_t __cfatls_get( unsigned long int offset ) {
    280280        // create a assembler label before
     
    295295extern "C" {
    296296        // Disable interrupts by incrementing the counter
    297         void disable_interrupts() {
     297        __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public {
    298298                // create a assembler label before
    299299                // marked as clobber all to avoid movement
     
    326326        // Enable interrupts by decrementing the counter
    327327        // If counter reaches 0, execute any pending __cfactx_switch
    328         void enable_interrupts( bool poll ) {
     328        void enable_interrupts( bool poll ) libcfa_public {
    329329                // Cache the processor now since interrupts can start happening after the atomic store
    330330                processor   * proc = __cfaabi_tls.this_processor;
     
    362362//-----------------------------------------------------------------------------
    363363// Kernel Signal Debug
    364 void __cfaabi_check_preemption() {
     364void __cfaabi_check_preemption() libcfa_public {
    365365        bool ready = __preemption_enabled();
    366366        if(!ready) { abort("Preemption should be ready"); }
  • libcfa/src/concurrency/ready_queue.cfa

    rbf0263c r90a8125  
    125125        const unsigned long long ctsc = rdtscl();
    126126
    127         if(proc->rdq.target == MAX) {
     127        if(proc->rdq.target == UINT_MAX) {
    128128                uint64_t chaos = __tls_rand();
    129129                unsigned ext = chaos & 0xff;
     
    137137                const unsigned target = proc->rdq.target;
    138138                __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv);
    139                 /* paranoid */ verify( readyQ.tscs[target].tv != MAX );
     139                /* paranoid */ verify( readyQ.tscs[target].tv != ULLONG_MAX );
    140140                if(target < lanes_count) {
    141141                        const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq);
     
    147147                        }
    148148                }
    149                 proc->rdq.target = MAX;
     149                proc->rdq.target = UINT_MAX;
    150150        }
    151151
     
    245245// get preferred ready for new thread
    246246unsigned ready_queue_new_preferred() {
    247         unsigned pref = MAX;
     247        unsigned pref = UINT_MAX;
    248248        if(struct thread$ * thrd = publicTLS_get( this_thread )) {
    249249                pref = thrd->preferred;
  • libcfa/src/concurrency/ready_subqueue.hfa

    rbf0263c r90a8125  
    3232        /* paranoid */ verify( this.lock );
    3333        /* paranoid */ verify( node->link.next == 0p );
    34         /* paranoid */ verify( node->link.ts  == MAX  );
     34        /* paranoid */ verify( __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED) == MAX  );
    3535        /* paranoid */ verify( this.prev->link.next == 0p );
    36         /* paranoid */ verify( this.prev->link.ts   == MAX  );
     36        /* paranoid */ verify( __atomic_load_n(&this.prev->link.ts, __ATOMIC_RELAXED)   == MAX  );
    3737        if( this.anchor.next == 0p ) {
    3838                /* paranoid */ verify( this.anchor.next == 0p );
    39                 /* paranoid */ verify( this.anchor.ts  == MAX );
    40                 /* paranoid */ verify( this.anchor.ts  != 0  );
     39                /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) == MAX );
     40                /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0  );
    4141                /* paranoid */ verify( this.prev == mock_head( this ) );
    4242        } else {
    4343                /* paranoid */ verify( this.anchor.next != 0p );
    44                 /* paranoid */ verify( this.anchor.ts  != MAX );
    45                 /* paranoid */ verify( this.anchor.ts  != 0  );
     44                /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX );
     45                /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0  );
    4646                /* paranoid */ verify( this.prev != mock_head( this ) );
    4747        }
     
    6262        /* paranoid */ verify( this.lock );
    6363        /* paranoid */ verify( this.anchor.next != 0p );
    64         /* paranoid */ verify( this.anchor.ts  != MAX );
    65         /* paranoid */ verify( this.anchor.ts   != 0  );
     64        /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX );
     65        /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0   );
    6666
    6767        // Get the relevant nodes locally
    6868        thread$ * node = this.anchor.next;
    6969        this.anchor.next = node->link.next;
    70         this.anchor.ts   = node->link.ts;
     70        __atomic_store_n(&this.anchor.ts, __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED);
    7171        bool is_empty = this.anchor.next == 0p;
    7272        node->link.next = 0p;
    73         node->link.ts   = MAX;
     73        __atomic_store_n(&node->link.ts, ULLONG_MAX, __ATOMIC_RELAXED);
    7474        #if !defined(__CFA_NO_STATISTICS__)
    7575                this.cnt--;
     
    7979        if(is_empty) this.prev = mock_head( this );
    8080
     81        unsigned long long ats = __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED);
    8182        /* paranoid */ verify( node->link.next == 0p );
    82         /* paranoid */ verify( node->link.ts   == MAX );
    83         /* paranoid */ verify( node->link.ts   != 0  );
    84         /* paranoid */ verify( this.anchor.ts  != 0 );
    85         /* paranoid */ verify( (this.anchor.ts == MAX) == is_empty );
    86         return [node, this.anchor.ts];
     83        /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) == MAX );
     84        /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) != 0   );
     85        /* paranoid */ verify( ats != 0 );
     86        /* paranoid */ verify( (ats == MAX) == is_empty );
     87        return [node, ats];
    8788}
    8889
     
    9697        // Cannot verify 'emptiness' here since it may not be locked
    9798        /* paranoid */ verify(this.anchor.ts != 0);
    98         return this.anchor.ts;
     99        /* paranoid */ static_assert(__atomic_always_lock_free(sizeof(this.anchor.ts), &this.anchor.ts));
     100        return __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED);
    99101}
  • libcfa/src/concurrency/thread.cfa

    rbf0263c r90a8125  
    1919#include "thread.hfa"
    2020
     21#include "exception.hfa"
    2122#include "kernel/private.hfa"
    22 #include "exception.hfa"
     23#include "limits.hfa"
    2324
    2425#define __CFA_INVOKE_PRIVATE__
     
    2627
    2728extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask;
     29
     30#pragma GCC visibility push(default)
    2831
    2932//-----------------------------------------------------------------------------
     
    4245        curr_cluster = &cl;
    4346        link.next = 0p;
    44         link.ts   = -1llu;
     47        link.ts   = MAX;
    4548        preferred = ready_queue_new_preferred();
    4649        last_proc = 0p;
     
    8689}
    8790
    88 forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T))
    89     | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
     91forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
     92    | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
    9093void ?{}( thread_dtor_guard_t & this,
    9194                T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) {
     
    165168
    166169//-----------------------------------------------------------------------------
    167 forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T))
    168     | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
     170forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
     171        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
    169172T & join( T & this ) {
    170173        thread_dtor_guard_t guard = { this, defaultResumptionHandler };
  • libcfa/src/concurrency/thread.hfa

    rbf0263c r90a8125  
    3232};
    3333
    34 EHM_FORALL_EXCEPTION(ThreadCancelled, (thread_t &), (thread_t)) (
     34forall(thread_t &)
     35exception ThreadCancelled {
    3536        thread_t * the_thread;
    3637        exception_t * the_exception;
    37 );
     38};
    3839
    3940forall(T &)
     
    7980};
    8081
    81 forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled, (T))
    82     | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
     82forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T))
     83        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
    8384void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(ThreadCancelled(T) &) );
    8485void ^?{}( thread_dtor_guard_t & this );
     
    126127//----------
    127128// join
    128 forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled, (T))
    129     | { EHM_DEFAULT_VTABLE(ThreadCancelled, (T)); })
     129forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T))
     130        | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); })
    130131T & join( T & this );
    131132
Note: See TracChangeset for help on using the changeset viewer.