| [75f3522] | 1 | // | 
|---|
|  | 2 | // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo | 
|---|
|  | 3 | // | 
|---|
|  | 4 | // The contents of this file are covered under the licence agreement in the | 
|---|
|  | 5 | // file "LICENCE" distributed with Cforall. | 
|---|
|  | 6 | // | 
|---|
| [73abe95] | 7 | // kernel_private.hfa -- | 
|---|
| [75f3522] | 8 | // | 
|---|
|  | 9 | // Author           : Thierry Delisle | 
|---|
|  | 10 | // Created On       : Mon Feb 13 12:27:26 2017 | 
|---|
| [6b0b624] | 11 | // Last Modified By : Peter A. Buhr | 
|---|
| [fd9b524] | 12 | // Last Modified On : Wed Aug 12 08:21:33 2020 | 
|---|
|  | 13 | // Update Count     : 9 | 
|---|
| [75f3522] | 14 | // | 
|---|
|  | 15 |  | 
|---|
| [6b0b624] | 16 | #pragma once | 
|---|
| [75f3522] | 17 |  | 
|---|
| [3489ea6] | 18 | #if !defined(__cforall_thread__) | 
|---|
|  | 19 | #error kernel_private.hfa should only be included in libcfathread source | 
|---|
|  | 20 | #endif | 
|---|
|  | 21 |  | 
|---|
| [58b6d1b] | 22 | #include "kernel.hfa" | 
|---|
|  | 23 | #include "thread.hfa" | 
|---|
| [75f3522] | 24 |  | 
|---|
| [73abe95] | 25 | #include "alarm.hfa" | 
|---|
| [8834751] | 26 | #include "stats.hfa" | 
|---|
| [fa21ac9] | 27 |  | 
|---|
| [3489ea6] | 28 | extern "C" { | 
|---|
|  | 29 | #if   defined(CFA_HAVE_LINUX_LIBRSEQ) | 
|---|
|  | 30 | #include <rseq/rseq.h> | 
|---|
|  | 31 | #elif defined(CFA_HAVE_LINUX_RSEQ_H) | 
|---|
| [f558b5f] | 32 | #include <linux/rseq.h> | 
|---|
| [3489ea6] | 33 | #else | 
|---|
|  | 34 | #ifndef _GNU_SOURCE | 
|---|
|  | 35 | #error kernel_private requires gnu_source | 
|---|
|  | 36 | #endif | 
|---|
|  | 37 | #include <sched.h> | 
|---|
|  | 38 | #endif | 
|---|
|  | 39 | } | 
|---|
|  | 40 |  | 
|---|
| [75f3522] | 41 | //----------------------------------------------------------------------------- | 
|---|
|  | 42 | // Scheduler | 
|---|
| [1c273d0] | 43 | extern "C" { | 
|---|
| [2026bb6] | 44 | void disable_interrupts() OPTIONAL_THREAD; | 
|---|
| [a3821fa] | 45 | void enable_interrupts( bool poll = true ); | 
|---|
| [1c273d0] | 46 | } | 
|---|
|  | 47 |  | 
|---|
| [24e321c] | 48 | void schedule_thread$( thread$ *, unpark_hint hint ) __attribute__((nonnull (1))); | 
|---|
| [75f3522] | 49 |  | 
|---|
| [8fc652e0] | 50 | extern bool __preemption_enabled(); | 
|---|
|  | 51 |  | 
|---|
| [5afb49a] | 52 | //release/wake-up the following resources | 
|---|
| [e84ab3d] | 53 | void __thread_finish( thread$ * thrd ); | 
|---|
| [db6f06a] | 54 |  | 
|---|
| [3489ea6] | 55 | //----------------------------------------------------------------------------- | 
|---|
|  | 56 | // Hardware | 
|---|
|  | 57 |  | 
|---|
|  | 58 | #if   defined(CFA_HAVE_LINUX_LIBRSEQ) | 
|---|
|  | 59 | // No data needed | 
|---|
|  | 60 | #elif defined(CFA_HAVE_LINUX_RSEQ_H) | 
|---|
|  | 61 | extern "Cforall" { | 
|---|
| [f558b5f] | 62 | extern __attribute__((aligned(128))) thread_local volatile struct rseq __cfaabi_rseq; | 
|---|
| [3489ea6] | 63 | } | 
|---|
|  | 64 | #else | 
|---|
|  | 65 | // No data needed | 
|---|
|  | 66 | #endif | 
|---|
|  | 67 |  | 
|---|
|  | 68 | static inline int __kernel_getcpu() { | 
|---|
|  | 69 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 70 | #if   defined(CFA_HAVE_LINUX_LIBRSEQ) | 
|---|
| [f558b5f] | 71 | return rseq_current_cpu(); | 
|---|
| [3489ea6] | 72 | #elif defined(CFA_HAVE_LINUX_RSEQ_H) | 
|---|
| [f558b5f] | 73 | int r = __cfaabi_rseq.cpu_id; | 
|---|
|  | 74 | /* paranoid */ verify( r >= 0 ); | 
|---|
|  | 75 | return r; | 
|---|
| [3489ea6] | 76 | #else | 
|---|
|  | 77 | return sched_getcpu(); | 
|---|
|  | 78 | #endif | 
|---|
|  | 79 | } | 
|---|
|  | 80 |  | 
|---|
| [75f3522] | 81 | //----------------------------------------------------------------------------- | 
|---|
|  | 82 | // Processor | 
|---|
|  | 83 | void main(processorCtx_t *); | 
|---|
| [85b1deb] | 84 |  | 
|---|
| [8c50aed] | 85 | void * __create_pthread( pthread_t *, void * (*)(void *), void * ); | 
|---|
| [bfcf6b9] | 86 | void __destroy_pthread( pthread_t pthread, void * stack, void ** retval ); | 
|---|
| [1805b1b] | 87 |  | 
|---|
| [6502a2b] | 88 | extern cluster * mainCluster; | 
|---|
|  | 89 |  | 
|---|
| [75f3522] | 90 | //----------------------------------------------------------------------------- | 
|---|
|  | 91 | // Threads | 
|---|
|  | 92 | extern "C" { | 
|---|
| [c7a900a] | 93 | void __cfactx_invoke_thread(void (*main)(void *), void * this); | 
|---|
| [75f3522] | 94 | } | 
|---|
|  | 95 |  | 
|---|
| [f7d6bb0] | 96 | __cfaabi_dbg_debug_do( | 
|---|
| [e84ab3d] | 97 | extern void __cfaabi_dbg_thread_register  ( thread$ * thrd ); | 
|---|
|  | 98 | extern void __cfaabi_dbg_thread_unregister( thread$ * thrd ); | 
|---|
| [f7d6bb0] | 99 | ) | 
|---|
|  | 100 |  | 
|---|
| [6a77224] | 101 | #define TICKET_BLOCKED (-1) // thread is blocked | 
|---|
|  | 102 | #define TICKET_RUNNING ( 0) // thread is running | 
|---|
|  | 103 | #define TICKET_UNBLOCK ( 1) // thread should ignore next block | 
|---|
|  | 104 |  | 
|---|
| [969b3fe] | 105 | //----------------------------------------------------------------------------- | 
|---|
|  | 106 | // Utils | 
|---|
| [e84ab3d] | 107 | void doregister( struct cluster * cltr, struct thread$ & thrd ); | 
|---|
|  | 108 | void unregister( struct cluster * cltr, struct thread$ & thrd ); | 
|---|
| [de94a60] | 109 |  | 
|---|
| [f00b26d4] | 110 | //----------------------------------------------------------------------------- | 
|---|
|  | 111 | // I/O | 
|---|
| [78da4ab] | 112 | $io_arbiter * create(void); | 
|---|
|  | 113 | void destroy($io_arbiter *); | 
|---|
| [f00b26d4] | 114 |  | 
|---|
| [7768b8d] | 115 | //======================================================================= | 
|---|
|  | 116 | // Cluster lock API | 
|---|
|  | 117 | //======================================================================= | 
|---|
|  | 118 | // Lock-Free registering/unregistering of threads | 
|---|
|  | 119 | // Register a processor to a given cluster and get its unique id in return | 
|---|
| [c993b15] | 120 | unsigned register_proc_id( void ); | 
|---|
| [7768b8d] | 121 |  | 
|---|
|  | 122 | // Unregister a processor from a given cluster using its id, getting back the original pointer | 
|---|
| [c993b15] | 123 | void unregister_proc_id( unsigned ); | 
|---|
| [7768b8d] | 124 |  | 
|---|
|  | 125 | //======================================================================= | 
|---|
|  | 126 | // Reader-writer lock implementation | 
|---|
|  | 127 | // Concurrent with doregister/unregister, | 
|---|
|  | 128 | //    i.e., threads can be added at any point during or between the entry/exit | 
|---|
| [dca5802] | 129 |  | 
|---|
|  | 130 | //----------------------------------------------------------------------- | 
|---|
|  | 131 | // simple spinlock underlying the RWLock | 
|---|
|  | 132 | // Blocking acquire | 
|---|
| [7768b8d] | 133 | static inline void __atomic_acquire(volatile bool * ll) { | 
|---|
|  | 134 | while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) { | 
|---|
|  | 135 | while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED)) | 
|---|
| [fd9b524] | 136 | Pause(); | 
|---|
| [7768b8d] | 137 | } | 
|---|
|  | 138 | /* paranoid */ verify(*ll); | 
|---|
|  | 139 | } | 
|---|
|  | 140 |  | 
|---|
| [dca5802] | 141 | // Non-Blocking acquire | 
|---|
| [7768b8d] | 142 | static inline bool __atomic_try_acquire(volatile bool * ll) { | 
|---|
| [b798713] | 143 | return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST); | 
|---|
| [7768b8d] | 144 | } | 
|---|
|  | 145 |  | 
|---|
| [dca5802] | 146 | // Release | 
|---|
| [7768b8d] | 147 | static inline void __atomic_unlock(volatile bool * ll) { | 
|---|
|  | 148 | /* paranoid */ verify(*ll); | 
|---|
|  | 149 | __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE); | 
|---|
|  | 150 | } | 
|---|
|  | 151 |  | 
|---|
| [a33c113] | 152 |  | 
|---|
|  | 153 |  | 
|---|
|  | 154 |  | 
|---|
|  | 155 |  | 
|---|
| [b388ee81] | 156 | //----------------------------------------------------------------------- | 
|---|
|  | 157 | // Reader-Writer lock protecting the ready-queues | 
|---|
|  | 158 | // while this lock is mostly generic some aspects | 
|---|
|  | 159 | // have been hard-coded to for the ready-queue for | 
|---|
|  | 160 | // simplicity and performance | 
|---|
|  | 161 | struct __scheduler_RWLock_t { | 
|---|
|  | 162 | // total cachelines allocated | 
|---|
|  | 163 | unsigned int max; | 
|---|
|  | 164 |  | 
|---|
|  | 165 | // cachelines currently in use | 
|---|
|  | 166 | volatile unsigned int alloc; | 
|---|
|  | 167 |  | 
|---|
|  | 168 | // cachelines ready to itereate over | 
|---|
|  | 169 | // (!= to alloc when thread is in second half of doregister) | 
|---|
|  | 170 | volatile unsigned int ready; | 
|---|
|  | 171 |  | 
|---|
|  | 172 | // writer lock | 
|---|
| [c993b15] | 173 | volatile bool write_lock; | 
|---|
| [b388ee81] | 174 |  | 
|---|
|  | 175 | // data pointer | 
|---|
| [c993b15] | 176 | volatile bool * volatile * data; | 
|---|
| [b388ee81] | 177 | }; | 
|---|
|  | 178 |  | 
|---|
|  | 179 | void  ?{}(__scheduler_RWLock_t & this); | 
|---|
|  | 180 | void ^?{}(__scheduler_RWLock_t & this); | 
|---|
|  | 181 |  | 
|---|
|  | 182 | extern __scheduler_RWLock_t * __scheduler_lock; | 
|---|
|  | 183 |  | 
|---|
| [7768b8d] | 184 | //----------------------------------------------------------------------- | 
|---|
|  | 185 | // Reader side : acquire when using the ready queue to schedule but not | 
|---|
|  | 186 | //  creating/destroying queues | 
|---|
| [e873838] | 187 | static inline void ready_schedule_lock(void) with(*__scheduler_lock) { | 
|---|
| [8fc652e0] | 188 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [c993b15] | 189 | /* paranoid */ verify( ! kernelTLS().in_sched_lock ); | 
|---|
|  | 190 | /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); | 
|---|
|  | 191 | /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id ); | 
|---|
| [7768b8d] | 192 |  | 
|---|
|  | 193 | // Step 1 : make sure no writer are in the middle of the critical section | 
|---|
| [c993b15] | 194 | while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED)) | 
|---|
| [fd9b524] | 195 | Pause(); | 
|---|
| [7768b8d] | 196 |  | 
|---|
|  | 197 | // Fence needed because we don't want to start trying to acquire the lock | 
|---|
|  | 198 | // before we read a false. | 
|---|
|  | 199 | // Not needed on x86 | 
|---|
|  | 200 | // std::atomic_thread_fence(std::memory_order_seq_cst); | 
|---|
|  | 201 |  | 
|---|
|  | 202 | // Step 2 : acquire our local lock | 
|---|
| [c993b15] | 203 | __atomic_acquire( &kernelTLS().sched_lock ); | 
|---|
|  | 204 | /*paranoid*/ verify(kernelTLS().sched_lock); | 
|---|
| [64a7146] | 205 |  | 
|---|
|  | 206 | #ifdef __CFA_WITH_VERIFY__ | 
|---|
|  | 207 | // Debug, check if this is owned for reading | 
|---|
| [c993b15] | 208 | kernelTLS().in_sched_lock = true; | 
|---|
| [64a7146] | 209 | #endif | 
|---|
| [7768b8d] | 210 | } | 
|---|
|  | 211 |  | 
|---|
| [e873838] | 212 | static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { | 
|---|
| [8fc652e0] | 213 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [c993b15] | 214 | /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock ); | 
|---|
|  | 215 | /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id ); | 
|---|
|  | 216 | /* paranoid */ verify( kernelTLS().sched_lock ); | 
|---|
|  | 217 | /* paranoid */ verify( kernelTLS().in_sched_lock ); | 
|---|
| [64a7146] | 218 | #ifdef __CFA_WITH_VERIFY__ | 
|---|
|  | 219 | // Debug, check if this is owned for reading | 
|---|
| [c993b15] | 220 | kernelTLS().in_sched_lock = false; | 
|---|
| [64a7146] | 221 | #endif | 
|---|
| [c993b15] | 222 | __atomic_unlock(&kernelTLS().sched_lock); | 
|---|
| [7768b8d] | 223 | } | 
|---|
|  | 224 |  | 
|---|
| [64a7146] | 225 | #ifdef __CFA_WITH_VERIFY__ | 
|---|
| [e873838] | 226 | static inline bool ready_schedule_islocked(void) { | 
|---|
| [8fc652e0] | 227 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
| [c993b15] | 228 | /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock ); | 
|---|
|  | 229 | return kernelTLS().sched_lock; | 
|---|
| [64a7146] | 230 | } | 
|---|
|  | 231 |  | 
|---|
|  | 232 | static inline bool ready_mutate_islocked() { | 
|---|
| [c993b15] | 233 | return __scheduler_lock->write_lock; | 
|---|
| [64a7146] | 234 | } | 
|---|
|  | 235 | #endif | 
|---|
|  | 236 |  | 
|---|
| [7768b8d] | 237 | //----------------------------------------------------------------------- | 
|---|
|  | 238 | // Writer side : acquire when changing the ready queue, e.g. adding more | 
|---|
|  | 239 | //  queues or removing them. | 
|---|
| [b388ee81] | 240 | uint_fast32_t ready_mutate_lock( void ); | 
|---|
| [7768b8d] | 241 |  | 
|---|
| [b388ee81] | 242 | void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ ); | 
|---|
| [7768b8d] | 243 |  | 
|---|
| [a33c113] | 244 | //----------------------------------------------------------------------- | 
|---|
|  | 245 | // Lock-Free registering/unregistering of threads | 
|---|
|  | 246 | // Register a processor to a given cluster and get its unique id in return | 
|---|
|  | 247 | // For convenience, also acquires the lock | 
|---|
| [c993b15] | 248 | static inline [unsigned, uint_fast32_t] ready_mutate_register() { | 
|---|
|  | 249 | unsigned id = register_proc_id(); | 
|---|
|  | 250 | uint_fast32_t last = ready_mutate_lock(); | 
|---|
|  | 251 | return [id, last]; | 
|---|
| [a33c113] | 252 | } | 
|---|
|  | 253 |  | 
|---|
|  | 254 | // Unregister a processor from a given cluster using its id, getting back the original pointer | 
|---|
|  | 255 | // assumes the lock is acquired | 
|---|
| [c993b15] | 256 | static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) { | 
|---|
| [a33c113] | 257 | ready_mutate_unlock( last_s ); | 
|---|
| [c993b15] | 258 | unregister_proc_id( id ); | 
|---|
| [a33c113] | 259 | } | 
|---|
|  | 260 |  | 
|---|
| [a7504db5] | 261 | //----------------------------------------------------------------------- | 
|---|
|  | 262 | // Cluster idle lock/unlock | 
|---|
| [6a9b12b] | 263 | static inline void lock(__cluster_proc_list & this) { | 
|---|
| [a7504db5] | 264 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 265 |  | 
|---|
|  | 266 | // Start by locking the global RWlock so that we know no-one is | 
|---|
|  | 267 | // adding/removing processors while we mess with the idle lock | 
|---|
|  | 268 | ready_schedule_lock(); | 
|---|
|  | 269 |  | 
|---|
|  | 270 | // Simple counting lock, acquired, acquired by incrementing the counter | 
|---|
|  | 271 | // to an odd number | 
|---|
|  | 272 | for() { | 
|---|
|  | 273 | uint64_t l = this.lock; | 
|---|
|  | 274 | if( | 
|---|
|  | 275 | (0 == (l % 2)) | 
|---|
|  | 276 | && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) | 
|---|
|  | 277 | ) return; | 
|---|
|  | 278 | Pause(); | 
|---|
|  | 279 | } | 
|---|
|  | 280 |  | 
|---|
|  | 281 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 282 | } | 
|---|
|  | 283 |  | 
|---|
| [6a9b12b] | 284 | static inline void unlock(__cluster_proc_list & this) { | 
|---|
| [a7504db5] | 285 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 286 |  | 
|---|
|  | 287 | /* paranoid */ verify( 1 == (this.lock % 2) ); | 
|---|
|  | 288 | // Simple couting lock, release by incrementing to an even number | 
|---|
|  | 289 | __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST ); | 
|---|
|  | 290 |  | 
|---|
|  | 291 | // Release the global lock, which we acquired when locking | 
|---|
|  | 292 | ready_schedule_unlock(); | 
|---|
|  | 293 |  | 
|---|
|  | 294 | /* paranoid */ verify( ! __preemption_enabled() ); | 
|---|
|  | 295 | } | 
|---|
|  | 296 |  | 
|---|
| [b798713] | 297 | //======================================================================= | 
|---|
|  | 298 | // Ready-Queue API | 
|---|
| [dca5802] | 299 | //----------------------------------------------------------------------- | 
|---|
|  | 300 | // push thread onto a ready queue for a cluster | 
|---|
|  | 301 | // returns true if the list was previously empty, false otherwise | 
|---|
| [24e321c] | 302 | __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, unpark_hint hint); | 
|---|
| [dca5802] | 303 |  | 
|---|
|  | 304 | //----------------------------------------------------------------------- | 
|---|
| [fc59df78] | 305 | // pop thread from the local queues of a cluster | 
|---|
| [dca5802] | 306 | // returns 0p if empty | 
|---|
| [1eb239e4] | 307 | // May return 0p spuriously | 
|---|
| [e84ab3d] | 308 | __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr); | 
|---|
| [dca5802] | 309 |  | 
|---|
| [1eb239e4] | 310 | //----------------------------------------------------------------------- | 
|---|
| [fc59df78] | 311 | // pop thread from any ready queue of a cluster | 
|---|
| [1eb239e4] | 312 | // returns 0p if empty | 
|---|
| [fc59df78] | 313 | // May return 0p spuriously | 
|---|
| [e84ab3d] | 314 | __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr); | 
|---|
| [1eb239e4] | 315 |  | 
|---|
| [fc59df78] | 316 | //----------------------------------------------------------------------- | 
|---|
|  | 317 | // search all ready queues of a cluster for any thread | 
|---|
|  | 318 | // returns 0p if empty | 
|---|
|  | 319 | // guaranteed to find any threads added before this call | 
|---|
| [e84ab3d] | 320 | __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr); | 
|---|
| [fc59df78] | 321 |  | 
|---|
| [24e321c] | 322 | //----------------------------------------------------------------------- | 
|---|
|  | 323 | // get preferred ready for new thread | 
|---|
|  | 324 | unsigned ready_queue_new_preferred(); | 
|---|
|  | 325 |  | 
|---|
| [dca5802] | 326 | //----------------------------------------------------------------------- | 
|---|
|  | 327 | // Increase the width of the ready queue (number of lanes) by 4 | 
|---|
| [a017ee7] | 328 | void ready_queue_grow  (struct cluster * cltr); | 
|---|
| [dca5802] | 329 |  | 
|---|
|  | 330 | //----------------------------------------------------------------------- | 
|---|
|  | 331 | // Decrease the width of the ready queue (number of lanes) by 4 | 
|---|
| [a017ee7] | 332 | void ready_queue_shrink(struct cluster * cltr); | 
|---|
| [b798713] | 333 |  | 
|---|
| [de94a60] | 334 |  | 
|---|
| [75f3522] | 335 | // Local Variables: // | 
|---|
|  | 336 | // mode: c // | 
|---|
|  | 337 | // tab-width: 4 // | 
|---|
| [4aa2fb2] | 338 | // End: // | 
|---|