Changes in / [a7b486b:6a490b2]


Ignore:
Files:
1 added
13 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/Makefile.am

    ra7b486b r6a490b2  
    4848thread_headers_nosrc = concurrency/invoke.h
    4949thread_headers = concurrency/coroutine.hfa concurrency/thread.hfa concurrency/kernel.hfa concurrency/monitor.hfa concurrency/mutex.hfa
    50 thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa ${thread_headers:.hfa=.cfa}
     50thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa concurrency/ready_queue.cfa ${thread_headers:.hfa=.cfa}
    5151else
    5252headers =
  • libcfa/src/Makefile.in

    ra7b486b r6a490b2  
    166166        concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa \
    167167        concurrency/invoke.c concurrency/io.cfa \
    168         concurrency/preemption.cfa concurrency/coroutine.cfa \
    169         concurrency/thread.cfa concurrency/kernel.cfa \
    170         concurrency/monitor.cfa concurrency/mutex.cfa
     168        concurrency/preemption.cfa concurrency/ready_queue.cfa \
     169        concurrency/coroutine.cfa concurrency/thread.cfa \
     170        concurrency/kernel.cfa concurrency/monitor.cfa \
     171        concurrency/mutex.cfa
    171172@BUILDLIB_TRUE@am__objects_3 = concurrency/coroutine.lo \
    172173@BUILDLIB_TRUE@ concurrency/thread.lo concurrency/kernel.lo \
     
    176177@BUILDLIB_TRUE@ concurrency/alarm.lo concurrency/invoke.lo \
    177178@BUILDLIB_TRUE@ concurrency/io.lo concurrency/preemption.lo \
    178 @BUILDLIB_TRUE@ $(am__objects_3)
     179@BUILDLIB_TRUE@ concurrency/ready_queue.lo $(am__objects_3)
    179180am_libcfathread_la_OBJECTS = $(am__objects_4)
    180181libcfathread_la_OBJECTS = $(am_libcfathread_la_OBJECTS)
     
    476477@BUILDLIB_FALSE@thread_headers =
    477478@BUILDLIB_TRUE@thread_headers = concurrency/coroutine.hfa concurrency/thread.hfa concurrency/kernel.hfa concurrency/monitor.hfa concurrency/mutex.hfa
    478 @BUILDLIB_TRUE@thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa ${thread_headers:.hfa=.cfa}
     479@BUILDLIB_TRUE@thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa concurrency/ready_queue.cfa ${thread_headers:.hfa=.cfa}
    479480
    480481#----------------------------------------------------------------------------------------------------------------
     
    614615        concurrency/$(DEPDIR)/$(am__dirstamp)
    615616concurrency/preemption.lo: concurrency/$(am__dirstamp) \
     617        concurrency/$(DEPDIR)/$(am__dirstamp)
     618concurrency/ready_queue.lo: concurrency/$(am__dirstamp) \
    616619        concurrency/$(DEPDIR)/$(am__dirstamp)
    617620concurrency/coroutine.lo: concurrency/$(am__dirstamp) \
  • libcfa/src/bits/defs.hfa

    ra7b486b r6a490b2  
    5454    return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
    5555}
     56
     57// #define __CFA_NO_BIT_TEST_AND_SET__
     58
     59#if defined( __i386 )
     60static inline bool __atomic_bts(volatile unsigned long int * target, unsigned long int bit ) {
     61        #if defined(__CFA_NO_BIT_TEST_AND_SET__)
     62        unsigned long int mask = 1ul << bit;
     63        unsigned long int ret = __atomic_fetch_or(target, mask, (int)__ATOMIC_RELAXED);
     64        return (ret & mask) != 0;
     65    #else
     66        int result = 0;
     67        asm volatile(
     68            "LOCK btsl %[bit], %[target]\n\t"
     69            : "=@ccc" (result)
     70            : [target] "m" (*target), [bit] "r" (bit)
     71        );
     72        return result != 0;
     73    #endif
     74}
     75
     76static inline bool __atomic_btr(volatile unsigned long int * target, unsigned long int bit ) {
     77        #if defined(__CFA_NO_BIT_TEST_AND_SET__)
     78        unsigned long int mask = 1ul << bit;
     79        unsigned long int ret = __atomic_fetch_and(target, ~mask, (int)__ATOMIC_RELAXED);
     80        return (ret & mask) != 0;
     81        #else
     82        int result = 0;
     83        asm volatile(
     84            "LOCK btrl %[bit], %[target]\n\t"
     85            :"=@ccc" (result)
     86            : [target] "m" (*target), [bit] "r" (bit)
     87        );
     88        return result != 0;
     89    #endif
     90}
     91#elif defined( __x86_64 )
     92static inline bool __atomic_bts(volatile unsigned long long int * target, unsigned long long int bit ) {
     93        #if defined(__CFA_NO_BIT_TEST_AND_SET__)
     94        unsigned long long int mask = 1ul << bit;
     95        unsigned long long int ret = __atomic_fetch_or(target, mask, (int)__ATOMIC_RELAXED);
     96        return (ret & mask) != 0;
     97    #else
     98        int result = 0;
     99        asm volatile(
     100            "LOCK btsq %[bit], %[target]\n\t"
     101            : "=@ccc" (result)
     102            : [target] "m" (*target), [bit] "r" (bit)
     103        );
     104        return result != 0;
     105    #endif
     106}
     107
     108static inline bool __atomic_btr(volatile unsigned long long int * target, unsigned long long int bit ) {
     109        #if defined(__CFA_NO_BIT_TEST_AND_SET__)
     110        unsigned long long int mask = 1ul << bit;
     111        unsigned long long int ret = __atomic_fetch_and(target, ~mask, (int)__ATOMIC_RELAXED);
     112        return (ret & mask) != 0;
     113        #else
     114        int result = 0;
     115        asm volatile(
     116            "LOCK btrq %[bit], %[target]\n\t"
     117            :"=@ccc" (result)
     118            : [target] "m" (*target), [bit] "r" (bit)
     119        );
     120        return result != 0;
     121    #endif
     122}
     123#elif defined( __ARM_ARCH )
     124    #error __atomic_bts and __atomic_btr not implemented for arm
     125#else
     126        #error uknown hardware architecture
     127#endif
  • libcfa/src/concurrency/invoke.h

    ra7b486b r6a490b2  
    161161        };
    162162
     163        // Link lists fields
     164        // instrusive link field for threads
     165        struct __thread_desc_link {
     166                struct $thread * next;
     167                struct $thread * prev;
     168                unsigned long long ts;
     169        };
     170
    163171        struct $thread {
    164172                // Core threading fields
     
    192200                // Link lists fields
    193201                // instrusive link field for threads
    194                 struct $thread * next;
     202                struct __thread_desc_link link;
    195203
    196204                struct {
     
    218226        #ifdef __cforall
    219227        extern "Cforall" {
     228
    220229                static inline $thread *& get_next( $thread & this ) __attribute__((const)) {
    221                         return this.next;
     230                        return this.link.next;
    222231                }
    223232
  • libcfa/src/concurrency/kernel.cfa

    ra7b486b r6a490b2  
    197197        self_mon.recursion = 1;
    198198        self_mon_p = &self_mon;
    199         next = 0p;
     199        link.next = 0p;
     200        link.prev = 0p;
    200201
    201202        node.next = 0p;
     
    223224        this.name = name;
    224225        this.cltr = &cltr;
     226        id = -1u;
    225227        terminated{ 0 };
    226228        destroyer = 0p;
     
    260262        this.preemption_rate = preemption_rate;
    261263        ready_queue{};
    262         ready_queue_lock{};
     264        ready_lock{};
    263265
    264266        #if !defined(__CFA_NO_STATISTICS__)
     
    295297        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
    296298
    297         doregister(this->cltr, this);
     299        // register the processor unless it's the main thread which is handled in the boot sequence
     300        if(this != mainProcessor) {
     301                this->id = doregister(this->cltr, this);
     302                ready_queue_grow( this->cltr );
     303        }
     304
    298305
    299306        {
     
    330337        }
    331338
    332         unregister(this->cltr, this);
    333 
    334339        V( this->terminated );
    335340
     341        // unregister the processor unless it's the main thread which is handled in the boot sequence
     342        if(this != mainProcessor) {
     343                ready_queue_shrink( this->cltr );
     344                unregister(this->cltr, this);
     345        }
     346        else {
     347                // HACK : the coroutine context switch expects this_thread to be set
     348                // and it make sense for it to be set in all other cases except here
     349                // fake it
     350                kernelTLS.this_thread = mainThread;
     351        }
     352
    336353        __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
    337354
    338         // HACK : the coroutine context switch expects this_thread to be set
    339         // and it make sense for it to be set in all other cases except here
    340         // fake it
    341         if( this == mainProcessor ) kernelTLS.this_thread = mainThread;
     355        stats_tls_tally(this->cltr);
    342356}
    343357
     
    591605// Scheduler routines
    592606// KERNEL ONLY
    593 void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) {
     607void __schedule_thread( $thread * thrd ) {
     608        /* paranoid */ verify( thrd );
     609        /* paranoid */ verify( thrd->state != Halted );
    594610        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    595611        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
     
    599615                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
    600616        /* paranoid */ #endif
    601         /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
     617        /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
    602618
    603619        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
    604620
    605         lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
    606         bool was_empty = !(ready_queue != 0);
    607         append( ready_queue, thrd );
    608         unlock( ready_queue_lock );
     621        ready_schedule_lock(thrd->curr_cluster, kernelTLS.this_processor);
     622                bool was_empty = push( thrd->curr_cluster, thrd );
     623        ready_schedule_unlock(thrd->curr_cluster, kernelTLS.this_processor);
    609624
    610625        __wake_one(thrd->curr_cluster, was_empty);
     
    617632        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    618633
    619         lock( ready_queue_lock __cfaabi_dbg_ctx2 );
    620         $thread * head = pop_head( ready_queue );
    621         unlock( ready_queue_lock );
     634        ready_schedule_lock(this, kernelTLS.this_processor);
     635                $thread * head = pop( this );
     636        ready_schedule_unlock(this, kernelTLS.this_processor);
    622637
    623638        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    763778                pending_preemption = false;
    764779                kernel_thread = pthread_self();
     780                id = -1u;
    765781
    766782                runner{ &this };
     
    772788        mainProcessor = (processor *)&storage_mainProcessor;
    773789        (*mainProcessor){};
     790
     791        mainProcessor->id = doregister(mainCluster, mainProcessor);
    774792
    775793        //initialize the global state variables
     
    826844        kernel_stop_preemption();
    827845
     846        unregister(mainCluster, mainProcessor);
     847
    828848        // Destroy the main processor and its context in reverse order of construction
    829849        // These were manually constructed so we need manually destroy them
    830850        void ^?{}(processor & this) with( this ){
    831851                /* paranoid */ verify( this.do_terminate == true );
     852                __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
    832853        }
    833854
     
    835856
    836857        // Final step, destroy the main thread since it is no longer needed
     858
    837859        // Since we provided a stack to this taxk it will not destroy anything
    838860        /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
     
    10771099        cltr->nthreads -= 1;
    10781100        unlock(cltr->thread_list_lock);
    1079 }
    1080 
    1081 void doregister( cluster * cltr, processor * proc ) {
    1082         lock      (cltr->idle_lock __cfaabi_dbg_ctx2);
    1083         cltr->nprocessors += 1;
    1084         push_front(cltr->procs, *proc);
    1085         unlock    (cltr->idle_lock);
    1086 }
    1087 
    1088 void unregister( cluster * cltr, processor * proc ) {
    1089         lock  (cltr->idle_lock __cfaabi_dbg_ctx2);
    1090         remove(cltr->procs, *proc );
    1091         cltr->nprocessors -= 1;
    1092         unlock(cltr->idle_lock);
    10931101}
    10941102
  • libcfa/src/concurrency/kernel.hfa

    ra7b486b r6a490b2  
    6060        // Cluster from which to get threads
    6161        struct cluster * cltr;
     62        unsigned int id;
    6263
    6364        // Name of the processor
     
    119120// #define CFA_CLUSTER_IO_POLLER_KERNEL_SIDE 1 << 1
    120121
     122
     123//-----------------------------------------------------------------------------
     124// Cluster Tools
     125
     126// Cells use by the reader writer lock
     127// while not generic it only relies on a opaque pointer
     128struct __processor_id;
     129
     130// Reader-Writer lock protecting the ready-queue
     131// while this lock is mostly generic some aspects
     132// have been hard-coded to for the ready-queue for
     133// simplicity and performance
     134struct __clusterRWLock_t {
     135        // total cachelines allocated
     136        unsigned int max;
     137
     138        // cachelines currently in use
     139        volatile unsigned int alloc;
     140
     141        // cachelines ready to itereate over
     142        // (!= to alloc when thread is in second half of doregister)
     143        volatile unsigned int ready;
     144
     145        // writer lock
     146        volatile bool lock;
     147
     148        // data pointer
     149        __processor_id * data;
     150};
     151
     152void  ?{}(__clusterRWLock_t & this);
     153void ^?{}(__clusterRWLock_t & this);
     154
     155// Intrusives lanes which are used by the relaxed ready queue
     156struct __attribute__((aligned(128))) __intrusive_lane_t {
     157        // spin lock protecting the queue
     158        volatile bool lock;
     159
     160        // anchor for the head and the tail of the queue
     161        struct __sentinel_t {
     162                // Link lists fields
     163                // instrusive link field for threads
     164                // must be exactly as in thread_desc
     165                __thread_desc_link link;
     166        } before, after;
     167
     168#if defined(__CFA_WITH_VERIFY__)
     169        // id of last processor to acquire the lock
     170        // needed only to check for mutual exclusion violations
     171        unsigned int last_id;
     172
     173        // number of items on this list
     174        // needed only to check for deadlocks
     175        unsigned int count;
     176#endif
     177
     178        // Optional statistic counters
     179        #if !defined(__CFA_NO_SCHED_STATS__)
     180                struct __attribute__((aligned(64))) {
     181                        // difference between number of push and pops
     182                        ssize_t diff;
     183
     184                        // total number of pushes and pops
     185                        size_t  push;
     186                        size_t  pop ;
     187                } stat;
     188        #endif
     189};
     190
     191void  ?{}(__intrusive_lane_t & this);
     192void ^?{}(__intrusive_lane_t & this);
     193
     194typedef unsigned long long __cfa_readyQ_mask_t;
     195
     196// enum {
     197//      __cfa_ready_queue_mask_size = (64 - sizeof(size_t)) / sizeof(size_t),
     198//      __cfa_max_ready_queues = __cfa_ready_queue_mask_size * 8 * sizeof(size_t)
     199// };
     200
     201#define __cfa_lane_mask_size ((64 - sizeof(size_t)) / sizeof(__cfa_readyQ_mask_t))
     202#define __cfa_max_lanes (__cfa_lane_mask_size * 8 * sizeof(__cfa_readyQ_mask_t))
     203
     204//TODO adjust cache size to ARCHITECTURE
     205// Structure holding the relaxed ready queue
     206struct __attribute__((aligned(128))) __ready_queue_t {
     207        // Data tracking how many/which lanes are used
     208        // Aligned to 128 for cache locality
     209        struct {
     210                // number of non-empty lanes
     211                volatile size_t count;
     212
     213                // bit mask, set bits indentify which lanes are non-empty
     214                volatile __cfa_readyQ_mask_t mask[ __cfa_lane_mask_size ];
     215        } used;
     216
     217        // Data tracking the actual lanes
     218        // On a seperate cacheline from the used struct since
     219        // used can change on each push/pop but this data
     220        // only changes on shrink/grow
     221        struct __attribute__((aligned(64))) {
     222                // Arary of lanes
     223                __intrusive_lane_t * volatile data;
     224
     225                // Number of lanes (empty or not)
     226                volatile size_t count;
     227        } lanes;
     228
     229        // Statistics
     230        #if !defined(__CFA_NO_STATISTICS__)
     231                __attribute__((aligned(64))) struct {
     232                        struct {
     233                                // Push statistic
     234                                struct {
     235                                        // number of attemps at pushing something
     236                                        volatile size_t attempt;
     237
     238                                        // number of successes at pushing
     239                                        volatile size_t success;
     240                                } push;
     241
     242                                // Pop statistic
     243                                struct {
     244                                        // number of reads of the mask
     245                                        // picking an empty __cfa_readyQ_mask_t counts here
     246                                        // but not as an attempt
     247                                        volatile size_t maskrds;
     248
     249                                        // number of attemps at poping something
     250                                        volatile size_t attempt;
     251
     252                                        // number of successes at poping
     253                                        volatile size_t success;
     254                                } pop;
     255                        } pick;
     256
     257                        // stats on the "used" struct of the queue
     258                        // tracks average number of queues that are not empty
     259                        // when pushing / poping
     260                        struct {
     261                                volatile size_t value;
     262                                volatile size_t count;
     263                        } used;
     264                } global_stats;
     265
     266        #endif
     267};
     268
     269void  ?{}(__ready_queue_t & this);
     270void ^?{}(__ready_queue_t & this);
     271
    121272//-----------------------------------------------------------------------------
    122273// Cluster
    123274struct cluster {
    124275        // Ready queue locks
    125         __spinlock_t ready_queue_lock;
     276        __clusterRWLock_t ready_lock;
    126277
    127278        // Ready queue for threads
    128         __queue_t($thread) ready_queue;
     279        __ready_queue_t ready_queue;
    129280
    130281        // Name of the cluster
     
    135286
    136287        // List of processors
    137         __spinlock_t idle_lock;
    138         __dllist_t(struct processor) procs;
     288        __spinlock_t proc_list_lock;
    139289        __dllist_t(struct processor) idles;
    140         unsigned int nprocessors;
    141290
    142291        // List of threads
  • libcfa/src/concurrency/kernel_private.hfa

    ra7b486b r6a490b2  
    8484//-----------------------------------------------------------------------------
    8585// Utils
    86 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
     86#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
    8787
    8888static inline uint32_t __tls_rand() {
     
    100100void unregister( struct cluster * cltr, struct $thread & thrd );
    101101
    102 void doregister( struct cluster * cltr, struct processor * proc );
    103 void unregister( struct cluster * cltr, struct processor * proc );
     102//=======================================================================
     103// Cluster lock API
     104//=======================================================================
     105struct __attribute__((aligned(64))) __processor_id {
     106        processor * volatile handle;
     107        volatile bool lock;
     108};
     109
     110// Lock-Free registering/unregistering of threads
     111// Register a processor to a given cluster and get its unique id in return
     112unsigned doregister( struct cluster * cltr, struct processor * proc );
     113
     114// Unregister a processor from a given cluster using its id, getting back the original pointer
     115void     unregister( struct cluster * cltr, struct processor * proc );
     116
     117//=======================================================================
     118// Reader-writer lock implementation
     119// Concurrent with doregister/unregister,
     120//    i.e., threads can be added at any point during or between the entry/exit
     121
     122//-----------------------------------------------------------------------
     123// simple spinlock underlying the RWLock
     124// Blocking acquire
     125static inline void __atomic_acquire(volatile bool * ll) {
     126        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
     127                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
     128                        asm volatile("pause");
     129        }
     130        /* paranoid */ verify(*ll);
     131}
     132
     133// Non-Blocking acquire
     134static inline bool __atomic_try_acquire(volatile bool * ll) {
     135        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
     136}
     137
     138// Release
     139static inline void __atomic_unlock(volatile bool * ll) {
     140        /* paranoid */ verify(*ll);
     141        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
     142}
     143
     144//-----------------------------------------------------------------------
     145// Reader side : acquire when using the ready queue to schedule but not
     146//  creating/destroying queues
     147static inline void ready_schedule_lock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
     148        unsigned iproc = proc->id;
     149        /*paranoid*/ verify(data[iproc].handle == proc);
     150        /*paranoid*/ verify(iproc < ready);
     151
     152        // Step 1 : make sure no writer are in the middle of the critical section
     153        while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
     154                asm volatile("pause");
     155
     156        // Fence needed because we don't want to start trying to acquire the lock
     157        // before we read a false.
     158        // Not needed on x86
     159        // std::atomic_thread_fence(std::memory_order_seq_cst);
     160
     161        // Step 2 : acquire our local lock
     162        __atomic_acquire( &data[iproc].lock );
     163        /*paranoid*/ verify(data[iproc].lock);
     164}
     165
     166static inline void ready_schedule_unlock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
     167        unsigned iproc = proc->id;
     168        /*paranoid*/ verify(data[iproc].handle == proc);
     169        /*paranoid*/ verify(iproc < ready);
     170        /*paranoid*/ verify(data[iproc].lock);
     171        __atomic_unlock(&data[iproc].lock);
     172}
     173
     174//-----------------------------------------------------------------------
     175// Writer side : acquire when changing the ready queue, e.g. adding more
     176//  queues or removing them.
     177uint_fast32_t ready_mutate_lock( struct cluster & cltr );
     178
     179void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t /* value returned by lock */ );
     180
     181//=======================================================================
     182// Ready-Queue API
     183//-----------------------------------------------------------------------
     184// push thread onto a ready queue for a cluster
     185// returns true if the list was previously empty, false otherwise
     186__attribute__((hot)) bool push(struct cluster * cltr, struct thread_desc * thrd);
     187
     188//-----------------------------------------------------------------------
     189// pop thread from the ready queue of a cluster
     190// returns 0p if empty
     191__attribute__((hot)) thread_desc * pop(struct cluster * cltr);
     192
     193//-----------------------------------------------------------------------
     194// Increase the width of the ready queue (number of lanes) by 4
     195void ready_queue_grow  (struct cluster * cltr);
     196
     197//-----------------------------------------------------------------------
     198// Decrease the width of the ready queue (number of lanes) by 4
     199void ready_queue_shrink(struct cluster * cltr);
     200
     201//-----------------------------------------------------------------------
     202// Statics call at the end of each thread to register statistics
     203#if !defined(__CFA_NO_STATISTICS__)
     204void stats_tls_tally(struct cluster * cltr);
     205#else
     206static inline void stats_tls_tally(struct cluster * cltr) {}
     207#endif
    104208
    105209// Local Variables: //
  • libcfa/src/concurrency/monitor.cfa

    ra7b486b r6a490b2  
    883883        }
    884884
    885         __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p );
     885        __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? (thread_desc*)node->waiting_thread : (thread_desc*)0p );
    886886        return ready2run ? node->waiting_thread : 0p;
    887887}
     
    907907        // For each thread in the entry-queue
    908908        for(    $thread ** thrd_it = &entry_queue.head;
    909                 *thrd_it != 1p;
    910                 thrd_it = &(*thrd_it)->next
     909                *thrd_it;
     910                thrd_it = &(*thrd_it)->link.next
    911911        ) {
    912912                // For each acceptable check if it matches
  • libcfa/src/concurrency/preemption.cfa

    ra7b486b r6a490b2  
    121121        // If there are still alarms pending, reset the timer
    122122        if( & (*alarms)`first ) {
    123                 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
     123                __cfadbg_print_buffer_decl(preemption, " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
    124124                Duration delta = (*alarms)`first.alarm - currtime;
    125125                Duration capped = max(delta, 50`us);
  • libcfa/src/concurrency/thread.cfa

    ra7b486b r6a490b2  
    3535        self_mon_p = &self_mon;
    3636        curr_cluster = &cl;
    37         next = 0p;
     37        link.next = 0p;
     38        link.prev = 0p;
    3839
    3940        node.next = 0p;
  • libcfa/src/stdhdr/assert.h

    ra7b486b r6a490b2  
    3333        #define verify(x) assert(x)
    3434        #define verifyf(x, ...) assertf(x, __VA_ARGS__)
     35        #define verifyfail(...)
    3536        #define __CFA_WITH_VERIFY__
    3637#else
    3738        #define verify(x)
    3839        #define verifyf(x, ...)
     40        #define verifyfail(...)
    3941#endif
    4042
  • tests/concurrent/examples/datingService.cfa

    ra7b486b r6a490b2  
    3535                signal_block( Boys[ccode] );                                    // restart boy to set phone number
    3636        } // if
    37         //sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode;
     37        // sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode;
    3838        return BoyPhoneNo;
    3939} // DatingService girl
     
    4747                signal_block( Girls[ccode] );                                   // restart girl to set phone number
    4848        } // if
    49         //sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode;
     49        // sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode;
    5050        return GirlPhoneNo;
    5151} // DatingService boy
  • tests/concurrent/waitfor/when.cfa

    ra7b486b r6a490b2  
    5757
    5858void arbiter( global_t & mutex this ) {
     59        // There is a race at start where callers can get in before the arbiter.
     60        // It doesn't really matter here so just restart the loop correctly and move on
     61        this.last_call = 6;
     62
    5963        for( int i = 0; i < N; i++ ) {
    6064                   when( this.last_call == 6 ) waitfor( call1 : this ) { if( this.last_call != 1) { serr | "Expected last_call to be 1 got" | this.last_call; } }
Note: See TracChangeset for help on using the changeset viewer.