| 1 | //
 | 
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
 | 
|---|
| 3 | //
 | 
|---|
| 4 | // The contents of this file are covered under the licence agreement in the
 | 
|---|
| 5 | // file "LICENCE" distributed with Cforall.
 | 
|---|
| 6 | //
 | 
|---|
| 7 | // kernel/private.hfa --
 | 
|---|
| 8 | //
 | 
|---|
| 9 | // Author           : Thierry Delisle
 | 
|---|
| 10 | // Created On       : Mon Feb 13 12:27:26 2017
 | 
|---|
| 11 | // Last Modified By : Peter A. Buhr
 | 
|---|
| 12 | // Last Modified On : Thu Mar  2 16:04:46 2023
 | 
|---|
| 13 | // Update Count     : 11
 | 
|---|
| 14 | //
 | 
|---|
| 15 | 
 | 
|---|
| 16 | #pragma once
 | 
|---|
| 17 | 
 | 
|---|
| 18 | #if !defined(__cforall_thread__)
 | 
|---|
| 19 |         #error kernel/private.hfa should only be included in libcfathread source
 | 
|---|
| 20 | #endif
 | 
|---|
| 21 | 
 | 
|---|
| 22 | #include <signal.h>
 | 
|---|
| 23 | 
 | 
|---|
| 24 | #include "kernel.hfa"
 | 
|---|
| 25 | #include "thread.hfa"
 | 
|---|
| 26 | 
 | 
|---|
| 27 | #include "alarm.hfa"
 | 
|---|
| 28 | #include "stats.hfa"
 | 
|---|
| 29 | 
 | 
|---|
| 30 | extern "C" {
 | 
|---|
| 31 |         #include <sched.h>
 | 
|---|
| 32 | }
 | 
|---|
| 33 | 
 | 
|---|
| 34 | // #define READYQ_USE_LINEAR_AVG
 | 
|---|
| 35 | #define READYQ_USE_LOGDBL_AVG
 | 
|---|
| 36 | // #define READYQ_USE_LOGINT_AVG
 | 
|---|
| 37 | 
 | 
|---|
| 38 | #if   defined(READYQ_USE_LINEAR_AVG)
 | 
|---|
| 39 | typedef unsigned long long __readyQ_avg_t;
 | 
|---|
| 40 | #elif defined(READYQ_USE_LOGDBL_AVG)
 | 
|---|
| 41 | typedef double __readyQ_avg_t;
 | 
|---|
| 42 | #elif defined(READYQ_USE_LOGDBL_AVG)
 | 
|---|
| 43 | typedef unsigned long long __readyQ_avg_t;
 | 
|---|
| 44 | #else
 | 
|---|
| 45 | #error must pick a scheme for averaging
 | 
|---|
| 46 | #endif
 | 
|---|
| 47 | 
 | 
|---|
| 48 | extern "C" {
 | 
|---|
| 49 |         __attribute__((visibility("protected"))) int __cfaabi_pthread_create(pthread_t *_thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
 | 
|---|
| 50 |         __attribute__((visibility("protected"))) int __cfaabi_pthread_join(pthread_t _thread, void **retval);
 | 
|---|
| 51 |         __attribute__((visibility("protected"))) pthread_t __cfaabi_pthread_self(void);
 | 
|---|
| 52 |         __attribute__((visibility("protected"))) int __cfaabi_pthread_attr_init(pthread_attr_t *attr);
 | 
|---|
| 53 |         __attribute__((visibility("protected"))) int __cfaabi_pthread_attr_destroy(pthread_attr_t *attr);
 | 
|---|
| 54 |         __attribute__((visibility("protected"))) int __cfaabi_pthread_attr_setstack( pthread_attr_t *attr, void *stackaddr, size_t stacksize );
 | 
|---|
| 55 |         __attribute__((visibility("protected"))) int __cfaabi_pthread_attr_getstacksize( const pthread_attr_t *attr, size_t *stacksize );
 | 
|---|
| 56 |         __attribute__((visibility("protected"))) int __cfaabi_pthread_sigqueue(pthread_t _thread, int sig, const union sigval value);
 | 
|---|
| 57 |         __attribute__((visibility("protected"))) int __cfaabi_pthread_sigmask( int how, const sigset_t *set, sigset_t *oset);
 | 
|---|
| 58 | }
 | 
|---|
| 59 | 
 | 
|---|
| 60 | //-----------------------------------------------------------------------------
 | 
|---|
| 61 | // Scheduler
 | 
|---|
| 62 | union __attribute__((aligned(64))) __timestamp_t {
 | 
|---|
| 63 |         struct {
 | 
|---|
| 64 |                 volatile unsigned long long tv;
 | 
|---|
| 65 |                 volatile __readyQ_avg_t ma;
 | 
|---|
| 66 |         } t;
 | 
|---|
| 67 |         char __padding[192];
 | 
|---|
| 68 | };
 | 
|---|
| 69 | 
 | 
|---|
| 70 | extern "C" {
 | 
|---|
| 71 |         void disable_interrupts() OPTIONAL_THREAD;
 | 
|---|
| 72 |         void enable_interrupts( bool poll = true );
 | 
|---|
| 73 | }
 | 
|---|
| 74 | 
 | 
|---|
| 75 | void schedule_thread$( thread$ *, unpark_hint hint ) __attribute__((nonnull (1)));
 | 
|---|
| 76 | 
 | 
|---|
| 77 | extern bool __preemption_enabled();
 | 
|---|
| 78 | 
 | 
|---|
| 79 | enum {
 | 
|---|
| 80 |         PREEMPT_NORMAL    = 0,
 | 
|---|
| 81 |         PREEMPT_TERMINATE = 1,
 | 
|---|
| 82 |         PREEMPT_IO = 2,
 | 
|---|
| 83 | };
 | 
|---|
| 84 | 
 | 
|---|
| 85 | static inline void __disable_interrupts_checked() {
 | 
|---|
| 86 |         /* paranoid */ verify( __preemption_enabled() );
 | 
|---|
| 87 |         disable_interrupts();
 | 
|---|
| 88 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 89 | }
 | 
|---|
| 90 | 
 | 
|---|
| 91 | static inline void __enable_interrupts_checked( bool poll = true ) {
 | 
|---|
| 92 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 93 |         enable_interrupts( poll );
 | 
|---|
| 94 |         /* paranoid */ verify( __preemption_enabled() );
 | 
|---|
| 95 | }
 | 
|---|
| 96 | 
 | 
|---|
| 97 | //release/wake-up the following resources
 | 
|---|
| 98 | void __thread_finish( thread$ * thrd );
 | 
|---|
| 99 | 
 | 
|---|
| 100 | //-----------------------------------------------------------------------------
 | 
|---|
| 101 | // Hardware
 | 
|---|
| 102 | 
 | 
|---|
| 103 | static inline int __kernel_getcpu() {
 | 
|---|
| 104 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 105 |         return sched_getcpu();
 | 
|---|
| 106 | }
 | 
|---|
| 107 | 
 | 
|---|
| 108 | //-----------------------------------------------------------------------------
 | 
|---|
| 109 | // Processor
 | 
|---|
| 110 | void main(processorCtx_t &);
 | 
|---|
| 111 | static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; }
 | 
|---|
| 112 | 
 | 
|---|
| 113 | void * __create_pthread( pthread_t *, void * (*)(void *), void * );
 | 
|---|
| 114 | void __destroy_pthread( pthread_t pthread, void * stack, void ** retval );
 | 
|---|
| 115 | 
 | 
|---|
| 116 | extern cluster * mainCluster;
 | 
|---|
| 117 | 
 | 
|---|
| 118 | //-----------------------------------------------------------------------------
 | 
|---|
| 119 | // Threads
 | 
|---|
| 120 | extern "C" {
 | 
|---|
| 121 |       void __cfactx_invoke_thread(void (*main)(void *), void * this);
 | 
|---|
| 122 | }
 | 
|---|
| 123 | 
 | 
|---|
| 124 | __cfaabi_dbg_debug_do(
 | 
|---|
| 125 |         extern void __cfaabi_dbg_thread_register  ( thread$ * thrd );
 | 
|---|
| 126 |         extern void __cfaabi_dbg_thread_unregister( thread$ * thrd );
 | 
|---|
| 127 | )
 | 
|---|
| 128 | 
 | 
|---|
| 129 | #define TICKET_BLOCKED (-1) // thread is blocked
 | 
|---|
| 130 | #define TICKET_RUNNING ( 0) // thread is running
 | 
|---|
| 131 | #define TICKET_UNBLOCK ( 1) // thread should ignore next block
 | 
|---|
| 132 | #define TICKET_DEAD    (0xDEAD) // thread should never be unparked
 | 
|---|
| 133 | 
 | 
|---|
| 134 | //-----------------------------------------------------------------------------
 | 
|---|
| 135 | // Utils
 | 
|---|
| 136 | void doregister( struct cluster * cltr, struct thread$ & thrd );
 | 
|---|
| 137 | void unregister( struct cluster * cltr, struct thread$ & thrd );
 | 
|---|
| 138 | 
 | 
|---|
| 139 | //-----------------------------------------------------------------------------
 | 
|---|
| 140 | // I/O
 | 
|---|
| 141 | io_arbiter$ * create(void);
 | 
|---|
| 142 | void destroy(io_arbiter$ *);
 | 
|---|
| 143 | 
 | 
|---|
| 144 | //=======================================================================
 | 
|---|
| 145 | // Cluster lock API
 | 
|---|
| 146 | //=======================================================================
 | 
|---|
| 147 | // Lock-Free registering/unregistering of threads
 | 
|---|
| 148 | // Register a processor to a given cluster and get its unique id in return
 | 
|---|
| 149 | unsigned register_proc_id( void );
 | 
|---|
| 150 | 
 | 
|---|
| 151 | // Unregister a processor from a given cluster using its id, getting back the original pointer
 | 
|---|
| 152 | void unregister_proc_id( unsigned );
 | 
|---|
| 153 | 
 | 
|---|
| 154 | //=======================================================================
 | 
|---|
| 155 | // Reader-writer lock implementation
 | 
|---|
| 156 | // Concurrent with doregister/unregister,
 | 
|---|
| 157 | //    i.e., threads can be added at any point during or between the entry/exit
 | 
|---|
| 158 | 
 | 
|---|
| 159 | //-----------------------------------------------------------------------
 | 
|---|
| 160 | // simple spinlock underlying the RWLock
 | 
|---|
| 161 | // Blocking acquire
 | 
|---|
| 162 | static inline void __atomic_acquire(volatile bool * ll) {
 | 
|---|
| 163 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 164 |         /* paranoid */ verify(ll);
 | 
|---|
| 165 | 
 | 
|---|
| 166 |         while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
 | 
|---|
| 167 |                 while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
 | 
|---|
| 168 |                         Pause();
 | 
|---|
| 169 |         }
 | 
|---|
| 170 |         /* paranoid */ verify(*ll);
 | 
|---|
| 171 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 172 | }
 | 
|---|
| 173 | 
 | 
|---|
| 174 | // Non-Blocking acquire
 | 
|---|
| 175 | static inline bool __atomic_try_acquire(volatile bool * ll) {
 | 
|---|
| 176 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 177 |         /* paranoid */ verify(ll);
 | 
|---|
| 178 | 
 | 
|---|
| 179 |         return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
 | 
|---|
| 180 | }
 | 
|---|
| 181 | 
 | 
|---|
| 182 | // Release
 | 
|---|
| 183 | static inline void __atomic_unlock(volatile bool * ll) {
 | 
|---|
| 184 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 185 |         /* paranoid */ verify(ll);
 | 
|---|
| 186 |         /* paranoid */ verify(*ll);
 | 
|---|
| 187 |         __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
 | 
|---|
| 188 | }
 | 
|---|
| 189 | 
 | 
|---|
| 190 | //-----------------------------------------------------------------------
 | 
|---|
| 191 | // Reader-Writer lock protecting the ready-queues
 | 
|---|
| 192 | // while this lock is mostly generic some aspects
 | 
|---|
| 193 | // have been hard-coded to for the ready-queue for
 | 
|---|
| 194 | // simplicity and performance
 | 
|---|
| 195 | union __attribute__((aligned(64))) __scheduler_RWLock_t {
 | 
|---|
| 196 |         struct {
 | 
|---|
| 197 |                 __attribute__((aligned(64))) char padding;
 | 
|---|
| 198 | 
 | 
|---|
| 199 |                 // total cachelines allocated
 | 
|---|
| 200 |                 __attribute__((aligned(64))) unsigned int max;
 | 
|---|
| 201 | 
 | 
|---|
| 202 |                 // cachelines currently in use
 | 
|---|
| 203 |                 volatile unsigned int alloc;
 | 
|---|
| 204 | 
 | 
|---|
| 205 |                 // cachelines ready to itereate over
 | 
|---|
| 206 |                 // (!= to alloc when thread is in second half of doregister)
 | 
|---|
| 207 |                 volatile unsigned int ready;
 | 
|---|
| 208 | 
 | 
|---|
| 209 |                 // writer lock
 | 
|---|
| 210 |                 volatile bool write_lock;
 | 
|---|
| 211 | 
 | 
|---|
| 212 |                 // data pointer
 | 
|---|
| 213 |                 volatile bool * volatile * data;
 | 
|---|
| 214 |         } lock;
 | 
|---|
| 215 |         char pad[192];
 | 
|---|
| 216 | };
 | 
|---|
| 217 | 
 | 
|---|
| 218 | void  ?{}(__scheduler_RWLock_t & this);
 | 
|---|
| 219 | void ^?{}(__scheduler_RWLock_t & this);
 | 
|---|
| 220 | 
 | 
|---|
| 221 | extern __scheduler_RWLock_t __scheduler_lock;
 | 
|---|
| 222 | 
 | 
|---|
| 223 | //-----------------------------------------------------------------------
 | 
|---|
| 224 | // Reader side : acquire when using the ready queue to schedule but not
 | 
|---|
| 225 | //  creating/destroying queues
 | 
|---|
| 226 | static inline void ready_schedule_lock(void) with(__scheduler_lock.lock) {
 | 
|---|
| 227 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 228 |         /* paranoid */ verify( ! kernelTLS().in_sched_lock );
 | 
|---|
| 229 |         /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
 | 
|---|
| 230 |         /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
 | 
|---|
| 231 | 
 | 
|---|
| 232 |         // Step 1 : make sure no writer are in the middle of the critical section
 | 
|---|
| 233 |         while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED))
 | 
|---|
| 234 |                 Pause();
 | 
|---|
| 235 | 
 | 
|---|
| 236 |         // Fence needed because we don't want to start trying to acquire the lock
 | 
|---|
| 237 |         // before we read a false.
 | 
|---|
| 238 |         // Not needed on x86
 | 
|---|
| 239 |         // std::atomic_thread_fence(std::memory_order_seq_cst);
 | 
|---|
| 240 | 
 | 
|---|
| 241 |         // Step 2 : acquire our local lock
 | 
|---|
| 242 |         __atomic_acquire( &kernelTLS().sched_lock );
 | 
|---|
| 243 |         /*paranoid*/ verify(kernelTLS().sched_lock);
 | 
|---|
| 244 | 
 | 
|---|
| 245 |         #ifdef __CFA_WITH_VERIFY__
 | 
|---|
| 246 |                 // Debug, check if this is owned for reading
 | 
|---|
| 247 |                 kernelTLS().in_sched_lock = true;
 | 
|---|
| 248 |         #endif
 | 
|---|
| 249 | }
 | 
|---|
| 250 | 
 | 
|---|
| 251 | static inline void ready_schedule_unlock(void) with(__scheduler_lock.lock) {
 | 
|---|
| 252 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 253 |         /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
 | 
|---|
| 254 |         /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
 | 
|---|
| 255 |         /* paranoid */ verify( kernelTLS().sched_lock );
 | 
|---|
| 256 |         /* paranoid */ verify( kernelTLS().in_sched_lock );
 | 
|---|
| 257 |         #ifdef __CFA_WITH_VERIFY__
 | 
|---|
| 258 |                 // Debug, check if this is owned for reading
 | 
|---|
| 259 |                 kernelTLS().in_sched_lock = false;
 | 
|---|
| 260 |         #endif
 | 
|---|
| 261 |         __atomic_unlock(&kernelTLS().sched_lock);
 | 
|---|
| 262 | }
 | 
|---|
| 263 | 
 | 
|---|
| 264 | #ifdef __CFA_WITH_VERIFY__
 | 
|---|
| 265 |         static inline bool ready_schedule_islocked(void) {
 | 
|---|
| 266 |                 /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 267 |                 /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock );
 | 
|---|
| 268 |                 return kernelTLS().sched_lock;
 | 
|---|
| 269 |         }
 | 
|---|
| 270 | 
 | 
|---|
| 271 |         static inline bool ready_mutate_islocked() {
 | 
|---|
| 272 |                 return __scheduler_lock.lock.write_lock;
 | 
|---|
| 273 |         }
 | 
|---|
| 274 | #endif
 | 
|---|
| 275 | 
 | 
|---|
| 276 | //-----------------------------------------------------------------------
 | 
|---|
| 277 | // Writer side : acquire when changing the ready queue, e.g. adding more
 | 
|---|
| 278 | //  queues or removing them.
 | 
|---|
| 279 | uint_fast32_t ready_mutate_lock( void );
 | 
|---|
| 280 | 
 | 
|---|
| 281 | void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
 | 
|---|
| 282 | 
 | 
|---|
| 283 | //-----------------------------------------------------------------------
 | 
|---|
| 284 | // Lock-Free registering/unregistering of threads
 | 
|---|
| 285 | // Register a processor to a given cluster and get its unique id in return
 | 
|---|
| 286 | // For convenience, also acquires the lock
 | 
|---|
| 287 | static inline [unsigned, uint_fast32_t] ready_mutate_register() {
 | 
|---|
| 288 |         unsigned id = register_proc_id();
 | 
|---|
| 289 |         uint_fast32_t last = ready_mutate_lock();
 | 
|---|
| 290 |         return [id, last];
 | 
|---|
| 291 | }
 | 
|---|
| 292 | 
 | 
|---|
| 293 | // Unregister a processor from a given cluster using its id, getting back the original pointer
 | 
|---|
| 294 | // assumes the lock is acquired
 | 
|---|
| 295 | static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) {
 | 
|---|
| 296 |         ready_mutate_unlock( last_s );
 | 
|---|
| 297 |         unregister_proc_id( id );
 | 
|---|
| 298 | }
 | 
|---|
| 299 | 
 | 
|---|
| 300 | //-----------------------------------------------------------------------
 | 
|---|
| 301 | // Cluster idle lock/unlock
 | 
|---|
| 302 | static inline void lock(__cluster_proc_list & this) {
 | 
|---|
| 303 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 304 | 
 | 
|---|
| 305 |         // Start by locking the global RWlock so that we know no-one is
 | 
|---|
| 306 |         // adding/removing processors while we mess with the idle lock
 | 
|---|
| 307 |         ready_schedule_lock();
 | 
|---|
| 308 | 
 | 
|---|
| 309 |         lock( this.lock __cfaabi_dbg_ctx2 );
 | 
|---|
| 310 | 
 | 
|---|
| 311 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 312 | }
 | 
|---|
| 313 | 
 | 
|---|
| 314 | static inline bool try_lock(__cluster_proc_list & this) {
 | 
|---|
| 315 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 316 | 
 | 
|---|
| 317 |         // Start by locking the global RWlock so that we know no-one is
 | 
|---|
| 318 |         // adding/removing processors while we mess with the idle lock
 | 
|---|
| 319 |         ready_schedule_lock();
 | 
|---|
| 320 | 
 | 
|---|
| 321 |         if(try_lock( this.lock __cfaabi_dbg_ctx2 )) {
 | 
|---|
| 322 |                 // success
 | 
|---|
| 323 |                 /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 324 |                 return true;
 | 
|---|
| 325 |         }
 | 
|---|
| 326 | 
 | 
|---|
| 327 |         // failed to lock
 | 
|---|
| 328 |         ready_schedule_unlock();
 | 
|---|
| 329 | 
 | 
|---|
| 330 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 331 |         return false;
 | 
|---|
| 332 | }
 | 
|---|
| 333 | 
 | 
|---|
| 334 | static inline void unlock(__cluster_proc_list & this) {
 | 
|---|
| 335 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 336 | 
 | 
|---|
| 337 |         unlock(this.lock);
 | 
|---|
| 338 | 
 | 
|---|
| 339 |         // Release the global lock, which we acquired when locking
 | 
|---|
| 340 |         ready_schedule_unlock();
 | 
|---|
| 341 | 
 | 
|---|
| 342 |         /* paranoid */ verify( ! __preemption_enabled() );
 | 
|---|
| 343 | }
 | 
|---|
| 344 | 
 | 
|---|
| 345 | //=======================================================================
 | 
|---|
| 346 | // Ready-Queue API
 | 
|---|
| 347 | //-----------------------------------------------------------------------
 | 
|---|
| 348 | // push thread onto a ready queue for a cluster
 | 
|---|
| 349 | // returns true if the list was previously empty, false otherwise
 | 
|---|
| 350 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint);
 | 
|---|
| 351 | 
 | 
|---|
| 352 | //-----------------------------------------------------------------------
 | 
|---|
| 353 | // pop thread from the local queues of a cluster
 | 
|---|
| 354 | // returns 0p if empty
 | 
|---|
| 355 | // May return 0p spuriously
 | 
|---|
| 356 | __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr);
 | 
|---|
| 357 | 
 | 
|---|
| 358 | //-----------------------------------------------------------------------
 | 
|---|
| 359 | // pop thread from any ready queue of a cluster
 | 
|---|
| 360 | // returns 0p if empty
 | 
|---|
| 361 | // May return 0p spuriously
 | 
|---|
| 362 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr);
 | 
|---|
| 363 | 
 | 
|---|
| 364 | //-----------------------------------------------------------------------
 | 
|---|
| 365 | // search all ready queues of a cluster for any thread
 | 
|---|
| 366 | // returns 0p if empty
 | 
|---|
| 367 | // guaranteed to find any threads added before this call
 | 
|---|
| 368 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr);
 | 
|---|
| 369 | 
 | 
|---|
| 370 | //-----------------------------------------------------------------------
 | 
|---|
| 371 | // get preferred ready for new thread
 | 
|---|
| 372 | unsigned ready_queue_new_preferred();
 | 
|---|
| 373 | 
 | 
|---|
| 374 | //-----------------------------------------------------------------------
 | 
|---|
| 375 | // Increase the width of the ready queue (number of lanes) by 4
 | 
|---|
| 376 | void ready_queue_grow  (struct cluster * cltr);
 | 
|---|
| 377 | 
 | 
|---|
| 378 | //-----------------------------------------------------------------------
 | 
|---|
| 379 | // Decrease the width of the ready queue (number of lanes) by 4
 | 
|---|
| 380 | void ready_queue_shrink(struct cluster * cltr);
 | 
|---|
| 381 | 
 | 
|---|
| 382 | //-----------------------------------------------------------------------
 | 
|---|
| 383 | // Decrease the width of the ready queue (number of lanes) by 4
 | 
|---|
| 384 | void ready_queue_close(struct cluster * cltr);
 | 
|---|
| 385 | 
 | 
|---|
| 386 | // Local Variables: //
 | 
|---|
| 387 | // mode: c //
 | 
|---|
| 388 | // tab-width: 4 //
 | 
|---|
| 389 | // End: //
 | 
|---|