Changes in / [6a490b2:a7b486b]


Ignore:
Files:
1 deleted
13 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/Makefile.am

    r6a490b2 ra7b486b  
    4848thread_headers_nosrc = concurrency/invoke.h
    4949thread_headers = concurrency/coroutine.hfa concurrency/thread.hfa concurrency/kernel.hfa concurrency/monitor.hfa concurrency/mutex.hfa
    50 thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa concurrency/ready_queue.cfa ${thread_headers:.hfa=.cfa}
     50thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa ${thread_headers:.hfa=.cfa}
    5151else
    5252headers =
  • libcfa/src/Makefile.in

    r6a490b2 ra7b486b  
    166166        concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa \
    167167        concurrency/invoke.c concurrency/io.cfa \
    168         concurrency/preemption.cfa concurrency/ready_queue.cfa \
    169         concurrency/coroutine.cfa concurrency/thread.cfa \
    170         concurrency/kernel.cfa concurrency/monitor.cfa \
    171         concurrency/mutex.cfa
     168        concurrency/preemption.cfa concurrency/coroutine.cfa \
     169        concurrency/thread.cfa concurrency/kernel.cfa \
     170        concurrency/monitor.cfa concurrency/mutex.cfa
    172171@BUILDLIB_TRUE@am__objects_3 = concurrency/coroutine.lo \
    173172@BUILDLIB_TRUE@ concurrency/thread.lo concurrency/kernel.lo \
     
    177176@BUILDLIB_TRUE@ concurrency/alarm.lo concurrency/invoke.lo \
    178177@BUILDLIB_TRUE@ concurrency/io.lo concurrency/preemption.lo \
    179 @BUILDLIB_TRUE@ concurrency/ready_queue.lo $(am__objects_3)
     178@BUILDLIB_TRUE@ $(am__objects_3)
    180179am_libcfathread_la_OBJECTS = $(am__objects_4)
    181180libcfathread_la_OBJECTS = $(am_libcfathread_la_OBJECTS)
     
    477476@BUILDLIB_FALSE@thread_headers =
    478477@BUILDLIB_TRUE@thread_headers = concurrency/coroutine.hfa concurrency/thread.hfa concurrency/kernel.hfa concurrency/monitor.hfa concurrency/mutex.hfa
    479 @BUILDLIB_TRUE@thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa concurrency/ready_queue.cfa ${thread_headers:.hfa=.cfa}
     478@BUILDLIB_TRUE@thread_libsrc = concurrency/CtxSwitch-@ARCHITECTURE@.S concurrency/alarm.cfa concurrency/invoke.c concurrency/io.cfa concurrency/preemption.cfa ${thread_headers:.hfa=.cfa}
    480479
    481480#----------------------------------------------------------------------------------------------------------------
     
    615614        concurrency/$(DEPDIR)/$(am__dirstamp)
    616615concurrency/preemption.lo: concurrency/$(am__dirstamp) \
    617         concurrency/$(DEPDIR)/$(am__dirstamp)
    618 concurrency/ready_queue.lo: concurrency/$(am__dirstamp) \
    619616        concurrency/$(DEPDIR)/$(am__dirstamp)
    620617concurrency/coroutine.lo: concurrency/$(am__dirstamp) \
  • libcfa/src/bits/defs.hfa

    r6a490b2 ra7b486b  
    5454    return ( (unsigned long long)lo)|( ((unsigned long long)hi)<<32 );
    5555}
    56 
    57 // #define __CFA_NO_BIT_TEST_AND_SET__
    58 
    59 #if defined( __i386 )
    60 static inline bool __atomic_bts(volatile unsigned long int * target, unsigned long int bit ) {
    61         #if defined(__CFA_NO_BIT_TEST_AND_SET__)
    62         unsigned long int mask = 1ul << bit;
    63         unsigned long int ret = __atomic_fetch_or(target, mask, (int)__ATOMIC_RELAXED);
    64         return (ret & mask) != 0;
    65     #else
    66         int result = 0;
    67         asm volatile(
    68             "LOCK btsl %[bit], %[target]\n\t"
    69             : "=@ccc" (result)
    70             : [target] "m" (*target), [bit] "r" (bit)
    71         );
    72         return result != 0;
    73     #endif
    74 }
    75 
    76 static inline bool __atomic_btr(volatile unsigned long int * target, unsigned long int bit ) {
    77         #if defined(__CFA_NO_BIT_TEST_AND_SET__)
    78         unsigned long int mask = 1ul << bit;
    79         unsigned long int ret = __atomic_fetch_and(target, ~mask, (int)__ATOMIC_RELAXED);
    80         return (ret & mask) != 0;
    81         #else
    82         int result = 0;
    83         asm volatile(
    84             "LOCK btrl %[bit], %[target]\n\t"
    85             :"=@ccc" (result)
    86             : [target] "m" (*target), [bit] "r" (bit)
    87         );
    88         return result != 0;
    89     #endif
    90 }
    91 #elif defined( __x86_64 )
    92 static inline bool __atomic_bts(volatile unsigned long long int * target, unsigned long long int bit ) {
    93         #if defined(__CFA_NO_BIT_TEST_AND_SET__)
    94         unsigned long long int mask = 1ul << bit;
    95         unsigned long long int ret = __atomic_fetch_or(target, mask, (int)__ATOMIC_RELAXED);
    96         return (ret & mask) != 0;
    97     #else
    98         int result = 0;
    99         asm volatile(
    100             "LOCK btsq %[bit], %[target]\n\t"
    101             : "=@ccc" (result)
    102             : [target] "m" (*target), [bit] "r" (bit)
    103         );
    104         return result != 0;
    105     #endif
    106 }
    107 
    108 static inline bool __atomic_btr(volatile unsigned long long int * target, unsigned long long int bit ) {
    109         #if defined(__CFA_NO_BIT_TEST_AND_SET__)
    110         unsigned long long int mask = 1ul << bit;
    111         unsigned long long int ret = __atomic_fetch_and(target, ~mask, (int)__ATOMIC_RELAXED);
    112         return (ret & mask) != 0;
    113         #else
    114         int result = 0;
    115         asm volatile(
    116             "LOCK btrq %[bit], %[target]\n\t"
    117             :"=@ccc" (result)
    118             : [target] "m" (*target), [bit] "r" (bit)
    119         );
    120         return result != 0;
    121     #endif
    122 }
    123 #elif defined( __ARM_ARCH )
    124     #error __atomic_bts and __atomic_btr not implemented for arm
    125 #else
    126         #error uknown hardware architecture
    127 #endif
  • libcfa/src/concurrency/invoke.h

    r6a490b2 ra7b486b  
    161161        };
    162162
    163         // Link lists fields
    164         // instrusive link field for threads
    165         struct __thread_desc_link {
    166                 struct $thread * next;
    167                 struct $thread * prev;
    168                 unsigned long long ts;
    169         };
    170 
    171163        struct $thread {
    172164                // Core threading fields
     
    200192                // Link lists fields
    201193                // instrusive link field for threads
    202                 struct __thread_desc_link link;
     194                struct $thread * next;
    203195
    204196                struct {
     
    226218        #ifdef __cforall
    227219        extern "Cforall" {
    228 
    229220                static inline $thread *& get_next( $thread & this ) __attribute__((const)) {
    230                         return this.link.next;
     221                        return this.next;
    231222                }
    232223
  • libcfa/src/concurrency/kernel.cfa

    r6a490b2 ra7b486b  
    197197        self_mon.recursion = 1;
    198198        self_mon_p = &self_mon;
    199         link.next = 0p;
    200         link.prev = 0p;
     199        next = 0p;
    201200
    202201        node.next = 0p;
     
    224223        this.name = name;
    225224        this.cltr = &cltr;
    226         id = -1u;
    227225        terminated{ 0 };
    228226        destroyer = 0p;
     
    262260        this.preemption_rate = preemption_rate;
    263261        ready_queue{};
    264         ready_lock{};
     262        ready_queue_lock{};
    265263
    266264        #if !defined(__CFA_NO_STATISTICS__)
     
    297295        __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this);
    298296
    299         // register the processor unless it's the main thread which is handled in the boot sequence
    300         if(this != mainProcessor) {
    301                 this->id = doregister(this->cltr, this);
    302                 ready_queue_grow( this->cltr );
    303         }
    304 
     297        doregister(this->cltr, this);
    305298
    306299        {
     
    337330        }
    338331
     332        unregister(this->cltr, this);
     333
    339334        V( this->terminated );
    340335
    341         // unregister the processor unless it's the main thread which is handled in the boot sequence
    342         if(this != mainProcessor) {
    343                 ready_queue_shrink( this->cltr );
    344                 unregister(this->cltr, this);
    345         }
    346         else {
    347                 // HACK : the coroutine context switch expects this_thread to be set
    348                 // and it make sense for it to be set in all other cases except here
    349                 // fake it
    350                 kernelTLS.this_thread = mainThread;
    351         }
    352 
    353336        __cfadbg_print_safe(runtime_core, "Kernel : core %p terminated\n", this);
    354337
    355         stats_tls_tally(this->cltr);
     338        // HACK : the coroutine context switch expects this_thread to be set
     339        // and it make sense for it to be set in all other cases except here
     340        // fake it
     341        if( this == mainProcessor ) kernelTLS.this_thread = mainThread;
    356342}
    357343
     
    605591// Scheduler routines
    606592// KERNEL ONLY
    607 void __schedule_thread( $thread * thrd ) {
    608         /* paranoid */ verify( thrd );
    609         /* paranoid */ verify( thrd->state != Halted );
     593void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) {
    610594        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    611595        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
     
    615599                          "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted );
    616600        /* paranoid */ #endif
    617         /* paranoid */ verifyf( thrd->link.next == 0p, "Expected null got %p", thrd->link.next );
     601        /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next );
    618602
    619603        if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready;
    620604
    621         ready_schedule_lock(thrd->curr_cluster, kernelTLS.this_processor);
    622                 bool was_empty = push( thrd->curr_cluster, thrd );
    623         ready_schedule_unlock(thrd->curr_cluster, kernelTLS.this_processor);
     605        lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
     606        bool was_empty = !(ready_queue != 0);
     607        append( ready_queue, thrd );
     608        unlock( ready_queue_lock );
    624609
    625610        __wake_one(thrd->curr_cluster, was_empty);
     
    632617        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    633618
    634         ready_schedule_lock(this, kernelTLS.this_processor);
    635                 $thread * head = pop( this );
    636         ready_schedule_unlock(this, kernelTLS.this_processor);
     619        lock( ready_queue_lock __cfaabi_dbg_ctx2 );
     620        $thread * head = pop_head( ready_queue );
     621        unlock( ready_queue_lock );
    637622
    638623        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    778763                pending_preemption = false;
    779764                kernel_thread = pthread_self();
    780                 id = -1u;
    781765
    782766                runner{ &this };
     
    788772        mainProcessor = (processor *)&storage_mainProcessor;
    789773        (*mainProcessor){};
    790 
    791         mainProcessor->id = doregister(mainCluster, mainProcessor);
    792774
    793775        //initialize the global state variables
     
    844826        kernel_stop_preemption();
    845827
    846         unregister(mainCluster, mainProcessor);
    847 
    848828        // Destroy the main processor and its context in reverse order of construction
    849829        // These were manually constructed so we need manually destroy them
    850830        void ^?{}(processor & this) with( this ){
    851831                /* paranoid */ verify( this.do_terminate == true );
    852                 __cfaabi_dbg_print_safe("Kernel : destroyed main processor context %p\n", &runner);
    853832        }
    854833
     
    856835
    857836        // Final step, destroy the main thread since it is no longer needed
    858 
    859837        // Since we provided a stack to this taxk it will not destroy anything
    860838        /* paranoid */ verify(mainThread->self_cor.stack.storage == (__stack_t*)(((uintptr_t)&storage_mainThreadCtx)| 0x1));
     
    10991077        cltr->nthreads -= 1;
    11001078        unlock(cltr->thread_list_lock);
     1079}
     1080
     1081void doregister( cluster * cltr, processor * proc ) {
     1082        lock      (cltr->idle_lock __cfaabi_dbg_ctx2);
     1083        cltr->nprocessors += 1;
     1084        push_front(cltr->procs, *proc);
     1085        unlock    (cltr->idle_lock);
     1086}
     1087
     1088void unregister( cluster * cltr, processor * proc ) {
     1089        lock  (cltr->idle_lock __cfaabi_dbg_ctx2);
     1090        remove(cltr->procs, *proc );
     1091        cltr->nprocessors -= 1;
     1092        unlock(cltr->idle_lock);
    11011093}
    11021094
  • libcfa/src/concurrency/kernel.hfa

    r6a490b2 ra7b486b  
    6060        // Cluster from which to get threads
    6161        struct cluster * cltr;
    62         unsigned int id;
    6362
    6463        // Name of the processor
     
    120119// #define CFA_CLUSTER_IO_POLLER_KERNEL_SIDE 1 << 1
    121120
    122 
    123 //-----------------------------------------------------------------------------
    124 // Cluster Tools
    125 
    126 // Cells use by the reader writer lock
    127 // while not generic it only relies on a opaque pointer
    128 struct __processor_id;
    129 
    130 // Reader-Writer lock protecting the ready-queue
    131 // while this lock is mostly generic some aspects
    132 // have been hard-coded to for the ready-queue for
    133 // simplicity and performance
    134 struct __clusterRWLock_t {
    135         // total cachelines allocated
    136         unsigned int max;
    137 
    138         // cachelines currently in use
    139         volatile unsigned int alloc;
    140 
    141         // cachelines ready to itereate over
    142         // (!= to alloc when thread is in second half of doregister)
    143         volatile unsigned int ready;
    144 
    145         // writer lock
    146         volatile bool lock;
    147 
    148         // data pointer
    149         __processor_id * data;
    150 };
    151 
    152 void  ?{}(__clusterRWLock_t & this);
    153 void ^?{}(__clusterRWLock_t & this);
    154 
    155 // Intrusives lanes which are used by the relaxed ready queue
    156 struct __attribute__((aligned(128))) __intrusive_lane_t {
    157         // spin lock protecting the queue
    158         volatile bool lock;
    159 
    160         // anchor for the head and the tail of the queue
    161         struct __sentinel_t {
    162                 // Link lists fields
    163                 // instrusive link field for threads
    164                 // must be exactly as in thread_desc
    165                 __thread_desc_link link;
    166         } before, after;
    167 
    168 #if defined(__CFA_WITH_VERIFY__)
    169         // id of last processor to acquire the lock
    170         // needed only to check for mutual exclusion violations
    171         unsigned int last_id;
    172 
    173         // number of items on this list
    174         // needed only to check for deadlocks
    175         unsigned int count;
    176 #endif
    177 
    178         // Optional statistic counters
    179         #if !defined(__CFA_NO_SCHED_STATS__)
    180                 struct __attribute__((aligned(64))) {
    181                         // difference between number of push and pops
    182                         ssize_t diff;
    183 
    184                         // total number of pushes and pops
    185                         size_t  push;
    186                         size_t  pop ;
    187                 } stat;
    188         #endif
    189 };
    190 
    191 void  ?{}(__intrusive_lane_t & this);
    192 void ^?{}(__intrusive_lane_t & this);
    193 
    194 typedef unsigned long long __cfa_readyQ_mask_t;
    195 
    196 // enum {
    197 //      __cfa_ready_queue_mask_size = (64 - sizeof(size_t)) / sizeof(size_t),
    198 //      __cfa_max_ready_queues = __cfa_ready_queue_mask_size * 8 * sizeof(size_t)
    199 // };
    200 
    201 #define __cfa_lane_mask_size ((64 - sizeof(size_t)) / sizeof(__cfa_readyQ_mask_t))
    202 #define __cfa_max_lanes (__cfa_lane_mask_size * 8 * sizeof(__cfa_readyQ_mask_t))
    203 
    204 //TODO adjust cache size to ARCHITECTURE
    205 // Structure holding the relaxed ready queue
    206 struct __attribute__((aligned(128))) __ready_queue_t {
    207         // Data tracking how many/which lanes are used
    208         // Aligned to 128 for cache locality
    209         struct {
    210                 // number of non-empty lanes
    211                 volatile size_t count;
    212 
    213                 // bit mask, set bits indentify which lanes are non-empty
    214                 volatile __cfa_readyQ_mask_t mask[ __cfa_lane_mask_size ];
    215         } used;
    216 
    217         // Data tracking the actual lanes
    218         // On a seperate cacheline from the used struct since
    219         // used can change on each push/pop but this data
    220         // only changes on shrink/grow
    221         struct __attribute__((aligned(64))) {
    222                 // Arary of lanes
    223                 __intrusive_lane_t * volatile data;
    224 
    225                 // Number of lanes (empty or not)
    226                 volatile size_t count;
    227         } lanes;
    228 
    229         // Statistics
    230         #if !defined(__CFA_NO_STATISTICS__)
    231                 __attribute__((aligned(64))) struct {
    232                         struct {
    233                                 // Push statistic
    234                                 struct {
    235                                         // number of attemps at pushing something
    236                                         volatile size_t attempt;
    237 
    238                                         // number of successes at pushing
    239                                         volatile size_t success;
    240                                 } push;
    241 
    242                                 // Pop statistic
    243                                 struct {
    244                                         // number of reads of the mask
    245                                         // picking an empty __cfa_readyQ_mask_t counts here
    246                                         // but not as an attempt
    247                                         volatile size_t maskrds;
    248 
    249                                         // number of attemps at poping something
    250                                         volatile size_t attempt;
    251 
    252                                         // number of successes at poping
    253                                         volatile size_t success;
    254                                 } pop;
    255                         } pick;
    256 
    257                         // stats on the "used" struct of the queue
    258                         // tracks average number of queues that are not empty
    259                         // when pushing / poping
    260                         struct {
    261                                 volatile size_t value;
    262                                 volatile size_t count;
    263                         } used;
    264                 } global_stats;
    265 
    266         #endif
    267 };
    268 
    269 void  ?{}(__ready_queue_t & this);
    270 void ^?{}(__ready_queue_t & this);
    271 
    272121//-----------------------------------------------------------------------------
    273122// Cluster
    274123struct cluster {
    275124        // Ready queue locks
    276         __clusterRWLock_t ready_lock;
     125        __spinlock_t ready_queue_lock;
    277126
    278127        // Ready queue for threads
    279         __ready_queue_t ready_queue;
     128        __queue_t($thread) ready_queue;
    280129
    281130        // Name of the cluster
     
    286135
    287136        // List of processors
    288         __spinlock_t proc_list_lock;
     137        __spinlock_t idle_lock;
     138        __dllist_t(struct processor) procs;
    289139        __dllist_t(struct processor) idles;
     140        unsigned int nprocessors;
    290141
    291142        // List of threads
  • libcfa/src/concurrency/kernel_private.hfa

    r6a490b2 ra7b486b  
    8484//-----------------------------------------------------------------------------
    8585// Utils
    86 #define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
     86#define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
    8787
    8888static inline uint32_t __tls_rand() {
     
    100100void unregister( struct cluster * cltr, struct $thread & thrd );
    101101
    102 //=======================================================================
    103 // Cluster lock API
    104 //=======================================================================
    105 struct __attribute__((aligned(64))) __processor_id {
    106         processor * volatile handle;
    107         volatile bool lock;
    108 };
    109 
    110 // Lock-Free registering/unregistering of threads
    111 // Register a processor to a given cluster and get its unique id in return
    112 unsigned doregister( struct cluster * cltr, struct processor * proc );
    113 
    114 // Unregister a processor from a given cluster using its id, getting back the original pointer
    115 void     unregister( struct cluster * cltr, struct processor * proc );
    116 
    117 //=======================================================================
    118 // Reader-writer lock implementation
    119 // Concurrent with doregister/unregister,
    120 //    i.e., threads can be added at any point during or between the entry/exit
    121 
    122 //-----------------------------------------------------------------------
    123 // simple spinlock underlying the RWLock
    124 // Blocking acquire
    125 static inline void __atomic_acquire(volatile bool * ll) {
    126         while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
    127                 while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
    128                         asm volatile("pause");
    129         }
    130         /* paranoid */ verify(*ll);
    131 }
    132 
    133 // Non-Blocking acquire
    134 static inline bool __atomic_try_acquire(volatile bool * ll) {
    135         return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
    136 }
    137 
    138 // Release
    139 static inline void __atomic_unlock(volatile bool * ll) {
    140         /* paranoid */ verify(*ll);
    141         __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
    142 }
    143 
    144 //-----------------------------------------------------------------------
    145 // Reader side : acquire when using the ready queue to schedule but not
    146 //  creating/destroying queues
    147 static inline void ready_schedule_lock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
    148         unsigned iproc = proc->id;
    149         /*paranoid*/ verify(data[iproc].handle == proc);
    150         /*paranoid*/ verify(iproc < ready);
    151 
    152         // Step 1 : make sure no writer are in the middle of the critical section
    153         while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
    154                 asm volatile("pause");
    155 
    156         // Fence needed because we don't want to start trying to acquire the lock
    157         // before we read a false.
    158         // Not needed on x86
    159         // std::atomic_thread_fence(std::memory_order_seq_cst);
    160 
    161         // Step 2 : acquire our local lock
    162         __atomic_acquire( &data[iproc].lock );
    163         /*paranoid*/ verify(data[iproc].lock);
    164 }
    165 
    166 static inline void ready_schedule_unlock( struct cluster * cltr, struct processor * proc) with(cltr->ready_lock) {
    167         unsigned iproc = proc->id;
    168         /*paranoid*/ verify(data[iproc].handle == proc);
    169         /*paranoid*/ verify(iproc < ready);
    170         /*paranoid*/ verify(data[iproc].lock);
    171         __atomic_unlock(&data[iproc].lock);
    172 }
    173 
    174 //-----------------------------------------------------------------------
    175 // Writer side : acquire when changing the ready queue, e.g. adding more
    176 //  queues or removing them.
    177 uint_fast32_t ready_mutate_lock( struct cluster & cltr );
    178 
    179 void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t /* value returned by lock */ );
    180 
    181 //=======================================================================
    182 // Ready-Queue API
    183 //-----------------------------------------------------------------------
    184 // push thread onto a ready queue for a cluster
    185 // returns true if the list was previously empty, false otherwise
    186 __attribute__((hot)) bool push(struct cluster * cltr, struct thread_desc * thrd);
    187 
    188 //-----------------------------------------------------------------------
    189 // pop thread from the ready queue of a cluster
    190 // returns 0p if empty
    191 __attribute__((hot)) thread_desc * pop(struct cluster * cltr);
    192 
    193 //-----------------------------------------------------------------------
    194 // Increase the width of the ready queue (number of lanes) by 4
    195 void ready_queue_grow  (struct cluster * cltr);
    196 
    197 //-----------------------------------------------------------------------
    198 // Decrease the width of the ready queue (number of lanes) by 4
    199 void ready_queue_shrink(struct cluster * cltr);
    200 
    201 //-----------------------------------------------------------------------
    202 // Statics call at the end of each thread to register statistics
    203 #if !defined(__CFA_NO_STATISTICS__)
    204 void stats_tls_tally(struct cluster * cltr);
    205 #else
    206 static inline void stats_tls_tally(struct cluster * cltr) {}
    207 #endif
     102void doregister( struct cluster * cltr, struct processor * proc );
     103void unregister( struct cluster * cltr, struct processor * proc );
    208104
    209105// Local Variables: //
  • libcfa/src/concurrency/monitor.cfa

    r6a490b2 ra7b486b  
    883883        }
    884884
    885         __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? (thread_desc*)node->waiting_thread : (thread_desc*)0p );
     885        __cfaabi_dbg_print_safe( "Kernel :  Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p );
    886886        return ready2run ? node->waiting_thread : 0p;
    887887}
     
    907907        // For each thread in the entry-queue
    908908        for(    $thread ** thrd_it = &entry_queue.head;
    909                 *thrd_it;
    910                 thrd_it = &(*thrd_it)->link.next
     909                *thrd_it != 1p;
     910                thrd_it = &(*thrd_it)->next
    911911        ) {
    912912                // For each acceptable check if it matches
  • libcfa/src/concurrency/preemption.cfa

    r6a490b2 ra7b486b  
    121121        // If there are still alarms pending, reset the timer
    122122        if( & (*alarms)`first ) {
    123                 __cfadbg_print_buffer_decl(preemption, " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
     123                __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
    124124                Duration delta = (*alarms)`first.alarm - currtime;
    125125                Duration capped = max(delta, 50`us);
  • libcfa/src/concurrency/thread.cfa

    r6a490b2 ra7b486b  
    3535        self_mon_p = &self_mon;
    3636        curr_cluster = &cl;
    37         link.next = 0p;
    38         link.prev = 0p;
     37        next = 0p;
    3938
    4039        node.next = 0p;
  • libcfa/src/stdhdr/assert.h

    r6a490b2 ra7b486b  
    3333        #define verify(x) assert(x)
    3434        #define verifyf(x, ...) assertf(x, __VA_ARGS__)
    35         #define verifyfail(...)
    3635        #define __CFA_WITH_VERIFY__
    3736#else
    3837        #define verify(x)
    3938        #define verifyf(x, ...)
    40         #define verifyfail(...)
    4139#endif
    4240
  • tests/concurrent/examples/datingService.cfa

    r6a490b2 ra7b486b  
    3535                signal_block( Boys[ccode] );                                    // restart boy to set phone number
    3636        } // if
    37         // sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode;
     37        //sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode;
    3838        return BoyPhoneNo;
    3939} // DatingService girl
     
    4747                signal_block( Girls[ccode] );                                   // restart girl to set phone number
    4848        } // if
    49         // sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode;
     49        //sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode;
    5050        return GirlPhoneNo;
    5151} // DatingService boy
  • tests/concurrent/waitfor/when.cfa

    r6a490b2 ra7b486b  
    5757
    5858void arbiter( global_t & mutex this ) {
    59         // There is a race at start where callers can get in before the arbiter.
    60         // It doesn't really matter here so just restart the loop correctly and move on
    61         this.last_call = 6;
    62 
    6359        for( int i = 0; i < N; i++ ) {
    6460                   when( this.last_call == 6 ) waitfor( call1 : this ) { if( this.last_call != 1) { serr | "Expected last_call to be 1 got" | this.last_call; } }
Note: See TracChangeset for help on using the changeset viewer.