| 1 | // | 
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo | 
|---|
| 3 | // | 
|---|
| 4 | // The contents of this file are covered under the licence agreement in the | 
|---|
| 5 | // file "LICENCE" distributed with Cforall. | 
|---|
| 6 | // | 
|---|
| 7 | // kernel_private.hfa -- | 
|---|
| 8 | // | 
|---|
| 9 | // Author           : Thierry Delisle | 
|---|
| 10 | // Created On       : Mon Feb 13 12:27:26 2017 | 
|---|
| 11 | // Last Modified By : Peter A. Buhr | 
|---|
| 12 | // Last Modified On : Wed Aug 12 08:21:33 2020 | 
|---|
| 13 | // Update Count     : 9 | 
|---|
| 14 | // | 
|---|
| 15 |  | 
|---|
| 16 | #pragma once | 
|---|
| 17 |  | 
|---|
| 18 | #if !defined(__cforall_thread__) | 
|---|
| 19 | #error kernel_private.hfa should only be included in libcfathread source | 
|---|
| 20 | #endif | 
|---|
| 21 |  | 
|---|
| 22 | #include "kernel.hfa" | 
|---|
| 23 | #include "thread.hfa" | 
|---|
| 24 |  | 
|---|
| 25 | #include "alarm.hfa" | 
|---|
| 26 | #include "stats.hfa" | 
|---|
| 27 |  | 
|---|
| 28 | extern "C" { | 
|---|
| 29 | #if   defined(CFA_HAVE_LINUX_LIBRSEQ) | 
|---|
| 30 | #include <rseq/rseq.h> | 
|---|
| 31 | #elif defined(CFA_HAVE_LINUX_RSEQ_H) | 
|---|
| 32 | #include <linux/rseq.h> | 
|---|
| 33 | #else | 
|---|
| 34 | #ifndef _GNU_SOURCE | 
|---|
| 35 | #error kernel_private requires gnu_source | 
|---|
| 36 | #endif | 
|---|
| 37 | #include <sched.h> | 
|---|
| 38 | #endif | 
|---|
| 39 | } | 
|---|
| 40 |  | 
|---|
| 41 | //----------------------------------------------------------------------------- | 
|---|
| 42 | // Scheduler | 
|---|
| 43 | extern "C" { | 
|---|
| 44 | void disable_interrupts() OPTIONAL_THREAD; | 
|---|
| 45 | void enable_interrupts( bool poll = true ); | 
|---|
| 46 | } | 
|---|
| 47 |  | 
|---|
| 48 | void schedule_thread$( thread$ * ) __attribute__((nonnull (1))); | 
|---|
| 49 |  | 
|---|
| 50 | extern bool __preemption_enabled(); | 
|---|
| 51 |  | 
|---|
| 52 | //release/wake-up the following resources | 
|---|
| 53 | void __thread_finish( thread$ * thrd ); | 
|---|
| 54 |  | 
|---|
| 55 | //----------------------------------------------------------------------------- | 
|---|
| 56 | // Hardware | 
|---|
| 57 |  | 
|---|
| 58 | #if   defined(CFA_HAVE_LINUX_LIBRSEQ) | 
|---|
| 59 | // No data needed | 
|---|
| 60 | #elif defined(CFA_HAVE_LINUX_RSEQ_H) | 
|---|
| 61 | extern "Cforall" { | 
|---|
| 62 | extern __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq; | 
|---|
| 63 | } | 
|---|
| 64 | #else | 
|---|
| 65 | // No data needed | 
|---|
| 66 | #endif | 
|---|
| 67 |  | 
|---|
| 68 | static inline int __kernel_getcpu() { | 
|---|
| 69 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| 70 | #if   defined(CFA_HAVE_LINUX_LIBRSEQ) | 
|---|
| 71 | return rseq_current_cpu(); | 
|---|
| 72 | #elif defined(CFA_HAVE_LINUX_RSEQ_H) | 
|---|
| 73 | int r = __cfaabi_rseq.cpu_id; | 
|---|
| 74 | /* paranoid */ verify( r >= 0 ); | 
|---|
| 75 | return r; | 
|---|
| 76 | #else | 
|---|
| 77 | return sched_getcpu(); | 
|---|
| 78 | #endif | 
|---|
| 79 | } | 
|---|
| 80 |  | 
|---|
| 81 | //----------------------------------------------------------------------------- | 
|---|
| 82 | // Processor | 
|---|
| 83 | void main(processorCtx_t *); | 
|---|
| 84 |  | 
|---|
| 85 | void * __create_pthread( pthread_t *, void * (*)(void *), void * ); | 
|---|
| 86 | void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ); | 
|---|
| 87 |  | 
|---|
| 88 | extern cluster * mainCluster; | 
|---|
| 89 |  | 
|---|
| 90 | //----------------------------------------------------------------------------- | 
|---|
| 91 | // Threads | 
|---|
| 92 | extern "C" { | 
|---|
| 93 | void __cfactx_invoke_thread(void (*main)(void *), void * this); | 
|---|
| 94 | } | 
|---|
| 95 |  | 
|---|
| 96 | __cfaabi_dbg_debug_do( | 
|---|
| 97 | extern void __cfaabi_dbg_thread_register  ( thread$ * thrd ); | 
|---|
| 98 | extern void __cfaabi_dbg_thread_unregister( thread$ * thrd ); | 
|---|
| 99 | ) | 
|---|
| 100 |  | 
|---|
| 101 | #define TICKET_BLOCKED (-1) // thread is blocked | 
|---|
| 102 | #define TICKET_RUNNING ( 0) // thread is running | 
|---|
| 103 | #define TICKET_UNBLOCK ( 1) // thread should ignore next block | 
|---|
| 104 |  | 
|---|
| 105 | //----------------------------------------------------------------------------- | 
|---|
| 106 | // Utils | 
|---|
| 107 | void doregister( struct cluster * cltr, struct thread$ & thrd ); | 
|---|
| 108 | void unregister( struct cluster * cltr, struct thread$ & thrd ); | 
|---|
| 109 |  | 
|---|
| 110 | //----------------------------------------------------------------------------- | 
|---|
| 111 | // I/O | 
|---|
| 112 | $io_arbiter * create(void); | 
|---|
| 113 | void destroy($io_arbiter *); | 
|---|
| 114 |  | 
|---|
| 115 | //======================================================================= | 
|---|
| 116 | // Cluster lock API | 
|---|
| 117 | //======================================================================= | 
|---|
| 118 | // Lock-Free registering/unregistering of threads | 
|---|
| 119 | // Register a processor to a given cluster and get its unique id in return | 
|---|
| 120 | unsigned register_proc_id( void ); | 
|---|
| 121 |  | 
|---|
| 122 | // Unregister a processor from a given cluster using its id, getting back the original pointer | 
|---|
| 123 | void unregister_proc_id( unsigned ); | 
|---|
| 124 |  | 
|---|
| 125 | //======================================================================= | 
|---|
| 126 | // Reader-writer lock implementation | 
|---|
| 127 | // Concurrent with doregister/unregister, | 
|---|
| 128 | //    i.e., threads can be added at any point during or between the entry/exit | 
|---|
| 129 |  | 
|---|
| 130 | //----------------------------------------------------------------------- | 
|---|
| 131 | // simple spinlock underlying the RWLock | 
|---|
| 132 | // Blocking acquire | 
|---|
| 133 | static inline void __atomic_acquire(volatile bool * ll) { | 
|---|
| 134 | while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) { | 
|---|
| 135 | while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED)) | 
|---|
| 136 | Pause(); | 
|---|
| 137 | } | 
|---|
| 138 | /* paranoid */ verify(*ll); | 
|---|
| 139 | } | 
|---|
| 140 |  | 
|---|
| 141 | // Non-Blocking acquire | 
|---|
| 142 | static inline bool __atomic_try_acquire(volatile bool * ll) { | 
|---|
| 143 | return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST); | 
|---|
| 144 | } | 
|---|
| 145 |  | 
|---|
| 146 | // Release | 
|---|
| 147 | static inline void __atomic_unlock(volatile bool * ll) { | 
|---|
| 148 | /* paranoid */ verify(*ll); | 
|---|
| 149 | __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE); | 
|---|
| 150 | } | 
|---|
| 151 |  | 
|---|
| 152 |  | 
|---|
| 153 |  | 
|---|
| 154 |  | 
|---|
| 155 |  | 
|---|
| 156 | //----------------------------------------------------------------------- | 
|---|
| 157 | // Reader-Writer lock protecting the ready-queues | 
|---|
| 158 | // while this lock is mostly generic some aspects | 
|---|
| 159 | // have been hard-coded to for the ready-queue for | 
|---|
| 160 | // simplicity and performance | 
|---|
| 161 | struct __scheduler_RWLock_t { | 
|---|
| 162 | // total cachelines allocated | 
|---|
| 163 | unsigned int max; | 
|---|
| 164 |  | 
|---|
| 165 | // cachelines currently in use | 
|---|
| 166 | volatile unsigned int alloc; | 
|---|
| 167 |  | 
|---|
| 168 | // cachelines ready to itereate over | 
|---|
| 169 | // (!= to alloc when thread is in second half of doregister) | 
|---|
| 170 | volatile unsigned int ready; | 
|---|
| 171 |  | 
|---|
| 172 | // writer lock | 
|---|
| 173 | volatile bool write_lock; | 
|---|
| 174 |  | 
|---|
| 175 | // data pointer | 
|---|
| 176 | volatile bool * volatile * data; | 
|---|
| 177 | }; | 
|---|
| 178 |  | 
|---|
| 179 | void  ?{}(__scheduler_RWLock_t & this); | 
|---|
| 180 | void ^?{}(__scheduler_RWLock_t & this); | 
|---|
| 181 |  | 
|---|
| 182 | extern __scheduler_RWLock_t * __scheduler_lock; | 
|---|
| 183 |  | 
|---|
| 184 | //----------------------------------------------------------------------- | 
|---|
| 185 | // Reader side : acquire when using the ready queue to schedule but not | 
|---|
| 186 | //  creating/destroying queues | 
|---|
| 187 | static inline void ready_schedule_lock(void) with(*__scheduler_lock) { | 
|---|
| 188 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| 189 | /* paranoid */ verify( ! kernelTLS().in_sched_lock ); | 
|---|
| 190 | /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); | 
|---|
| 191 | /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id ); | 
|---|
| 192 |  | 
|---|
| 193 | // Step 1 : make sure no writer are in the middle of the critical section | 
|---|
| 194 | while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED)) | 
|---|
| 195 | Pause(); | 
|---|
| 196 |  | 
|---|
| 197 | // Fence needed because we don't want to start trying to acquire the lock | 
|---|
| 198 | // before we read a false. | 
|---|
| 199 | // Not needed on x86 | 
|---|
| 200 | // std::atomic_thread_fence(std::memory_order_seq_cst); | 
|---|
| 201 |  | 
|---|
| 202 | // Step 2 : acquire our local lock | 
|---|
| 203 | __atomic_acquire( &kernelTLS().sched_lock ); | 
|---|
| 204 | /*paranoid*/ verify(kernelTLS().sched_lock); | 
|---|
| 205 |  | 
|---|
| 206 | #ifdef __CFA_WITH_VERIFY__ | 
|---|
| 207 | // Debug, check if this is owned for reading | 
|---|
| 208 | kernelTLS().in_sched_lock = true; | 
|---|
| 209 | #endif | 
|---|
| 210 | } | 
|---|
| 211 |  | 
|---|
| 212 | static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { | 
|---|
| 213 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| 214 | /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); | 
|---|
| 215 | /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id ); | 
|---|
| 216 | /* paranoid */ verify( kernelTLS().sched_lock ); | 
|---|
| 217 | /* paranoid */ verify( kernelTLS().in_sched_lock ); | 
|---|
| 218 | #ifdef __CFA_WITH_VERIFY__ | 
|---|
| 219 | // Debug, check if this is owned for reading | 
|---|
| 220 | kernelTLS().in_sched_lock = false; | 
|---|
| 221 | #endif | 
|---|
| 222 | __atomic_unlock(&kernelTLS().sched_lock); | 
|---|
| 223 | } | 
|---|
| 224 |  | 
|---|
| 225 | #ifdef __CFA_WITH_VERIFY__ | 
|---|
| 226 | static inline bool ready_schedule_islocked(void) { | 
|---|
| 227 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| 228 | /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock ); | 
|---|
| 229 | return kernelTLS().sched_lock; | 
|---|
| 230 | } | 
|---|
| 231 |  | 
|---|
| 232 | static inline bool ready_mutate_islocked() { | 
|---|
| 233 | return __scheduler_lock->write_lock; | 
|---|
| 234 | } | 
|---|
| 235 | #endif | 
|---|
| 236 |  | 
|---|
| 237 | //----------------------------------------------------------------------- | 
|---|
| 238 | // Writer side : acquire when changing the ready queue, e.g. adding more | 
|---|
| 239 | //  queues or removing them. | 
|---|
| 240 | uint_fast32_t ready_mutate_lock( void ); | 
|---|
| 241 |  | 
|---|
| 242 | void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ ); | 
|---|
| 243 |  | 
|---|
| 244 | //----------------------------------------------------------------------- | 
|---|
| 245 | // Lock-Free registering/unregistering of threads | 
|---|
| 246 | // Register a processor to a given cluster and get its unique id in return | 
|---|
| 247 | // For convenience, also acquires the lock | 
|---|
| 248 | static inline [unsigned, uint_fast32_t] ready_mutate_register() { | 
|---|
| 249 | unsigned id = register_proc_id(); | 
|---|
| 250 | uint_fast32_t last = ready_mutate_lock(); | 
|---|
| 251 | return [id, last]; | 
|---|
| 252 | } | 
|---|
| 253 |  | 
|---|
| 254 | // Unregister a processor from a given cluster using its id, getting back the original pointer | 
|---|
| 255 | // assumes the lock is acquired | 
|---|
| 256 | static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) { | 
|---|
| 257 | ready_mutate_unlock( last_s ); | 
|---|
| 258 | unregister_proc_id( id ); | 
|---|
| 259 | } | 
|---|
| 260 |  | 
|---|
| 261 | //----------------------------------------------------------------------- | 
|---|
| 262 | // Cluster idle lock/unlock | 
|---|
| 263 | static inline void lock(__cluster_proc_list & this) { | 
|---|
| 264 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| 265 |  | 
|---|
| 266 | // Start by locking the global RWlock so that we know no-one is | 
|---|
| 267 | // adding/removing processors while we mess with the idle lock | 
|---|
| 268 | ready_schedule_lock(); | 
|---|
| 269 |  | 
|---|
| 270 | // Simple counting lock, acquired, acquired by incrementing the counter | 
|---|
| 271 | // to an odd number | 
|---|
| 272 | for() { | 
|---|
| 273 | uint64_t l = this.lock; | 
|---|
| 274 | if( | 
|---|
| 275 | (0 == (l % 2)) | 
|---|
| 276 | && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) | 
|---|
| 277 | ) return; | 
|---|
| 278 | Pause(); | 
|---|
| 279 | } | 
|---|
| 280 |  | 
|---|
| 281 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| 282 | } | 
|---|
| 283 |  | 
|---|
| 284 | static inline void unlock(__cluster_proc_list & this) { | 
|---|
| 285 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| 286 |  | 
|---|
| 287 | /* paranoid */ verify( 1 == (this.lock % 2) ); | 
|---|
| 288 | // Simple couting lock, release by incrementing to an even number | 
|---|
| 289 | __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST ); | 
|---|
| 290 |  | 
|---|
| 291 | // Release the global lock, which we acquired when locking | 
|---|
| 292 | ready_schedule_unlock(); | 
|---|
| 293 |  | 
|---|
| 294 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| 295 | } | 
|---|
| 296 |  | 
|---|
| 297 | //======================================================================= | 
|---|
| 298 | // Ready-Queue API | 
|---|
| 299 | //----------------------------------------------------------------------- | 
|---|
| 300 | // push thread onto a ready queue for a cluster | 
|---|
| 301 | // returns true if the list was previously empty, false otherwise | 
|---|
| 302 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool local); | 
|---|
| 303 |  | 
|---|
| 304 | //----------------------------------------------------------------------- | 
|---|
| 305 | // pop thread from the local queues of a cluster | 
|---|
| 306 | // returns 0p if empty | 
|---|
| 307 | // May return 0p spuriously | 
|---|
| 308 | __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr); | 
|---|
| 309 |  | 
|---|
| 310 | //----------------------------------------------------------------------- | 
|---|
| 311 | // pop thread from any ready queue of a cluster | 
|---|
| 312 | // returns 0p if empty | 
|---|
| 313 | // May return 0p spuriously | 
|---|
| 314 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr); | 
|---|
| 315 |  | 
|---|
| 316 | //----------------------------------------------------------------------- | 
|---|
| 317 | // search all ready queues of a cluster for any thread | 
|---|
| 318 | // returns 0p if empty | 
|---|
| 319 | // guaranteed to find any threads added before this call | 
|---|
| 320 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr); | 
|---|
| 321 |  | 
|---|
| 322 | //----------------------------------------------------------------------- | 
|---|
| 323 | // Increase the width of the ready queue (number of lanes) by 4 | 
|---|
| 324 | void ready_queue_grow  (struct cluster * cltr); | 
|---|
| 325 |  | 
|---|
| 326 | //----------------------------------------------------------------------- | 
|---|
| 327 | // Decrease the width of the ready queue (number of lanes) by 4 | 
|---|
| 328 | void ready_queue_shrink(struct cluster * cltr); | 
|---|
| 329 |  | 
|---|
| 330 |  | 
|---|
| 331 | // Local Variables: // | 
|---|
| 332 | // mode: c // | 
|---|
| 333 | // tab-width: 4 // | 
|---|
| 334 | // End: // | 
|---|