| 1 | //
|
|---|
| 2 | // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
|
|---|
| 3 | //
|
|---|
| 4 | // The contents of this file are covered under the licence agreement in the
|
|---|
| 5 | // file "LICENCE" distributed with Cforall.
|
|---|
| 6 | //
|
|---|
| 7 | // kernel --
|
|---|
| 8 | //
|
|---|
| 9 | // Author : Thierry Delisle
|
|---|
| 10 | // Created On : Tue Jan 17 12:27:26 2017
|
|---|
| 11 | // Last Modified By : Peter A. Buhr
|
|---|
| 12 | // Last Modified On : Tue Feb 4 12:29:26 2020
|
|---|
| 13 | // Update Count : 22
|
|---|
| 14 | //
|
|---|
| 15 |
|
|---|
| 16 | #pragma once
|
|---|
| 17 |
|
|---|
| 18 | #include <stdbool.h>
|
|---|
| 19 | #include <stdint.h>
|
|---|
| 20 |
|
|---|
| 21 | #include "invoke.h"
|
|---|
| 22 | #include "time_t.hfa"
|
|---|
| 23 | #include "coroutine.hfa"
|
|---|
| 24 |
|
|---|
| 25 | extern "C" {
|
|---|
| 26 | #include <pthread.h>
|
|---|
| 27 | #include <semaphore.h>
|
|---|
| 28 | }
|
|---|
| 29 |
|
|---|
| 30 | //-----------------------------------------------------------------------------
|
|---|
| 31 | // Locks
|
|---|
| 32 | struct semaphore {
|
|---|
| 33 | __spinlock_t lock;
|
|---|
| 34 | int count;
|
|---|
| 35 | __queue_t($thread) waiting;
|
|---|
| 36 | };
|
|---|
| 37 |
|
|---|
| 38 | void ?{}(semaphore & this, int count = 1);
|
|---|
| 39 | void ^?{}(semaphore & this);
|
|---|
| 40 | bool P (semaphore & this);
|
|---|
| 41 | bool V (semaphore & this);
|
|---|
| 42 | bool V (semaphore & this, unsigned count);
|
|---|
| 43 |
|
|---|
| 44 |
|
|---|
| 45 | //-----------------------------------------------------------------------------
|
|---|
| 46 | // Processor
|
|---|
| 47 | extern struct cluster * mainCluster;
|
|---|
| 48 |
|
|---|
| 49 | // Processor
|
|---|
| 50 | coroutine processorCtx_t {
|
|---|
| 51 | struct processor * proc;
|
|---|
| 52 | };
|
|---|
| 53 |
|
|---|
| 54 | // Wrapper around kernel threads
|
|---|
| 55 | struct processor {
|
|---|
| 56 | // Main state
|
|---|
| 57 | // Coroutine ctx who does keeps the state of the processor
|
|---|
| 58 | struct processorCtx_t runner;
|
|---|
| 59 |
|
|---|
| 60 | // Cluster from which to get threads
|
|---|
| 61 | struct cluster * cltr;
|
|---|
| 62 | unsigned int id;
|
|---|
| 63 |
|
|---|
| 64 | // Name of the processor
|
|---|
| 65 | const char * name;
|
|---|
| 66 |
|
|---|
| 67 | // Handle to pthreads
|
|---|
| 68 | pthread_t kernel_thread;
|
|---|
| 69 |
|
|---|
| 70 | // RunThread data
|
|---|
| 71 | // Action to do after a thread is ran
|
|---|
| 72 | $thread * destroyer;
|
|---|
| 73 |
|
|---|
| 74 | // Preemption data
|
|---|
| 75 | // Node which is added in the discrete event simulaiton
|
|---|
| 76 | struct alarm_node_t * preemption_alarm;
|
|---|
| 77 |
|
|---|
| 78 | // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
|
|---|
| 79 | bool pending_preemption;
|
|---|
| 80 |
|
|---|
| 81 | // Idle lock (kernel semaphore)
|
|---|
| 82 | __bin_sem_t idle;
|
|---|
| 83 |
|
|---|
| 84 | // Termination
|
|---|
| 85 | // Set to true to notify the processor should terminate
|
|---|
| 86 | volatile bool do_terminate;
|
|---|
| 87 |
|
|---|
| 88 | // Termination synchronisation (user semaphore)
|
|---|
| 89 | semaphore terminated;
|
|---|
| 90 |
|
|---|
| 91 | // pthread Stack
|
|---|
| 92 | void * stack;
|
|---|
| 93 |
|
|---|
| 94 | // Link lists fields
|
|---|
| 95 | struct __dbg_node_cltr {
|
|---|
| 96 | processor * next;
|
|---|
| 97 | processor * prev;
|
|---|
| 98 | } node;
|
|---|
| 99 |
|
|---|
| 100 | #ifdef __CFA_DEBUG__
|
|---|
| 101 | // Last function to enable preemption on this processor
|
|---|
| 102 | const char * last_enable;
|
|---|
| 103 | #endif
|
|---|
| 104 | };
|
|---|
| 105 |
|
|---|
| 106 | void ?{}(processor & this, const char name[], struct cluster & cltr);
|
|---|
| 107 | void ^?{}(processor & this);
|
|---|
| 108 |
|
|---|
| 109 | static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; }
|
|---|
| 110 | static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
|
|---|
| 111 | static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
|
|---|
| 112 |
|
|---|
| 113 | static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; }
|
|---|
| 114 |
|
|---|
| 115 | //-----------------------------------------------------------------------------
|
|---|
| 116 | // I/O
|
|---|
| 117 | struct __io_data;
|
|---|
| 118 |
|
|---|
| 119 | #define CFA_CLUSTER_IO_POLLER_USER_THREAD 1 << 0
|
|---|
| 120 | // #define CFA_CLUSTER_IO_POLLER_KERNEL_SIDE 1 << 1
|
|---|
| 121 |
|
|---|
| 122 |
|
|---|
| 123 | //-----------------------------------------------------------------------------
|
|---|
| 124 | // Cluster Tools
|
|---|
| 125 |
|
|---|
| 126 | // Cells use by the reader writer lock
|
|---|
| 127 | // while not generic it only relies on a opaque pointer
|
|---|
| 128 | struct __processor_id;
|
|---|
| 129 |
|
|---|
| 130 | // Reader-Writer lock protecting the ready-queue
|
|---|
| 131 | // while this lock is mostly generic some aspects
|
|---|
| 132 | // have been hard-coded to for the ready-queue for
|
|---|
| 133 | // simplicity and performance
|
|---|
| 134 | struct __clusterRWLock_t {
|
|---|
| 135 | // total cachelines allocated
|
|---|
| 136 | unsigned int max;
|
|---|
| 137 |
|
|---|
| 138 | // cachelines currently in use
|
|---|
| 139 | volatile unsigned int alloc;
|
|---|
| 140 |
|
|---|
| 141 | // cachelines ready to itereate over
|
|---|
| 142 | // (!= to alloc when thread is in second half of doregister)
|
|---|
| 143 | volatile unsigned int ready;
|
|---|
| 144 |
|
|---|
| 145 | // writer lock
|
|---|
| 146 | volatile bool lock;
|
|---|
| 147 |
|
|---|
| 148 | // data pointer
|
|---|
| 149 | __processor_id * data;
|
|---|
| 150 | };
|
|---|
| 151 |
|
|---|
| 152 | void ?{}(__clusterRWLock_t & this);
|
|---|
| 153 | void ^?{}(__clusterRWLock_t & this);
|
|---|
| 154 |
|
|---|
| 155 | // Intrusives lanes which are used by the relaxed ready queue
|
|---|
| 156 | struct __attribute__((aligned(128))) __intrusive_lane_t {
|
|---|
| 157 | // spin lock protecting the queue
|
|---|
| 158 | volatile bool lock;
|
|---|
| 159 |
|
|---|
| 160 | // anchor for the head and the tail of the queue
|
|---|
| 161 | struct __sentinel_t {
|
|---|
| 162 | // Link lists fields
|
|---|
| 163 | // instrusive link field for threads
|
|---|
| 164 | // must be exactly as in $thread
|
|---|
| 165 | __thread_desc_link link;
|
|---|
| 166 | } before, after;
|
|---|
| 167 |
|
|---|
| 168 | #if defined(__CFA_WITH_VERIFY__)
|
|---|
| 169 | // id of last processor to acquire the lock
|
|---|
| 170 | // needed only to check for mutual exclusion violations
|
|---|
| 171 | unsigned int last_id;
|
|---|
| 172 |
|
|---|
| 173 | // number of items on this list
|
|---|
| 174 | // needed only to check for deadlocks
|
|---|
| 175 | unsigned int count;
|
|---|
| 176 | #endif
|
|---|
| 177 |
|
|---|
| 178 | // Optional statistic counters
|
|---|
| 179 | #if !defined(__CFA_NO_SCHED_STATS__)
|
|---|
| 180 | struct __attribute__((aligned(64))) {
|
|---|
| 181 | // difference between number of push and pops
|
|---|
| 182 | ssize_t diff;
|
|---|
| 183 |
|
|---|
| 184 | // total number of pushes and pops
|
|---|
| 185 | size_t push;
|
|---|
| 186 | size_t pop ;
|
|---|
| 187 | } stat;
|
|---|
| 188 | #endif
|
|---|
| 189 | };
|
|---|
| 190 |
|
|---|
| 191 | void ?{}(__intrusive_lane_t & this);
|
|---|
| 192 | void ^?{}(__intrusive_lane_t & this);
|
|---|
| 193 |
|
|---|
| 194 | typedef unsigned long long __cfa_readyQ_mask_t;
|
|---|
| 195 |
|
|---|
| 196 | // enum {
|
|---|
| 197 | // __cfa_ready_queue_mask_size = (64 - sizeof(size_t)) / sizeof(size_t),
|
|---|
| 198 | // __cfa_max_ready_queues = __cfa_ready_queue_mask_size * 8 * sizeof(size_t)
|
|---|
| 199 | // };
|
|---|
| 200 |
|
|---|
| 201 | #define __cfa_lane_mask_size ((64 - sizeof(size_t)) / sizeof(__cfa_readyQ_mask_t))
|
|---|
| 202 | #define __cfa_max_lanes (__cfa_lane_mask_size * 8 * sizeof(__cfa_readyQ_mask_t))
|
|---|
| 203 |
|
|---|
| 204 | //TODO adjust cache size to ARCHITECTURE
|
|---|
| 205 | // Structure holding the relaxed ready queue
|
|---|
| 206 | struct __attribute__((aligned(128))) __ready_queue_t {
|
|---|
| 207 | // Data tracking how many/which lanes are used
|
|---|
| 208 | // Aligned to 128 for cache locality
|
|---|
| 209 | struct {
|
|---|
| 210 | // number of non-empty lanes
|
|---|
| 211 | volatile size_t count;
|
|---|
| 212 |
|
|---|
| 213 | // bit mask, set bits indentify which lanes are non-empty
|
|---|
| 214 | volatile __cfa_readyQ_mask_t mask[ __cfa_lane_mask_size ];
|
|---|
| 215 | } used;
|
|---|
| 216 |
|
|---|
| 217 | // Data tracking the actual lanes
|
|---|
| 218 | // On a seperate cacheline from the used struct since
|
|---|
| 219 | // used can change on each push/pop but this data
|
|---|
| 220 | // only changes on shrink/grow
|
|---|
| 221 | struct __attribute__((aligned(64))) {
|
|---|
| 222 | // Arary of lanes
|
|---|
| 223 | __intrusive_lane_t * volatile data;
|
|---|
| 224 |
|
|---|
| 225 | // Number of lanes (empty or not)
|
|---|
| 226 | volatile size_t count;
|
|---|
| 227 | } lanes;
|
|---|
| 228 |
|
|---|
| 229 | // Statistics
|
|---|
| 230 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 231 | __attribute__((aligned(64))) struct {
|
|---|
| 232 | struct {
|
|---|
| 233 | // Push statistic
|
|---|
| 234 | struct {
|
|---|
| 235 | // number of attemps at pushing something
|
|---|
| 236 | volatile size_t attempt;
|
|---|
| 237 |
|
|---|
| 238 | // number of successes at pushing
|
|---|
| 239 | volatile size_t success;
|
|---|
| 240 | } push;
|
|---|
| 241 |
|
|---|
| 242 | // Pop statistic
|
|---|
| 243 | struct {
|
|---|
| 244 | // number of reads of the mask
|
|---|
| 245 | // picking an empty __cfa_readyQ_mask_t counts here
|
|---|
| 246 | // but not as an attempt
|
|---|
| 247 | volatile size_t maskrds;
|
|---|
| 248 |
|
|---|
| 249 | // number of attemps at poping something
|
|---|
| 250 | volatile size_t attempt;
|
|---|
| 251 |
|
|---|
| 252 | // number of successes at poping
|
|---|
| 253 | volatile size_t success;
|
|---|
| 254 | } pop;
|
|---|
| 255 | } pick;
|
|---|
| 256 |
|
|---|
| 257 | // stats on the "used" struct of the queue
|
|---|
| 258 | // tracks average number of queues that are not empty
|
|---|
| 259 | // when pushing / poping
|
|---|
| 260 | struct {
|
|---|
| 261 | volatile size_t value;
|
|---|
| 262 | volatile size_t count;
|
|---|
| 263 | } used;
|
|---|
| 264 | } global_stats;
|
|---|
| 265 |
|
|---|
| 266 | #endif
|
|---|
| 267 | };
|
|---|
| 268 |
|
|---|
| 269 | void ?{}(__ready_queue_t & this);
|
|---|
| 270 | void ^?{}(__ready_queue_t & this);
|
|---|
| 271 |
|
|---|
| 272 | //-----------------------------------------------------------------------------
|
|---|
| 273 | // Cluster
|
|---|
| 274 | struct cluster {
|
|---|
| 275 | // Ready queue locks
|
|---|
| 276 | __clusterRWLock_t ready_lock;
|
|---|
| 277 |
|
|---|
| 278 | // Ready queue for threads
|
|---|
| 279 | __ready_queue_t ready_queue;
|
|---|
| 280 |
|
|---|
| 281 | // Name of the cluster
|
|---|
| 282 | const char * name;
|
|---|
| 283 |
|
|---|
| 284 | // Preemption rate on this cluster
|
|---|
| 285 | Duration preemption_rate;
|
|---|
| 286 |
|
|---|
| 287 | // List of processors
|
|---|
| 288 | __spinlock_t idle_lock;
|
|---|
| 289 | __dllist_t(struct processor) procs;
|
|---|
| 290 | __dllist_t(struct processor) idles;
|
|---|
| 291 | unsigned int nprocessors;
|
|---|
| 292 |
|
|---|
| 293 | // List of threads
|
|---|
| 294 | __spinlock_t thread_list_lock;
|
|---|
| 295 | __dllist_t(struct $thread) threads;
|
|---|
| 296 | unsigned int nthreads;
|
|---|
| 297 |
|
|---|
| 298 | // Link lists fields
|
|---|
| 299 | struct __dbg_node_cltr {
|
|---|
| 300 | cluster * next;
|
|---|
| 301 | cluster * prev;
|
|---|
| 302 | } node;
|
|---|
| 303 |
|
|---|
| 304 | struct __io_data * io;
|
|---|
| 305 |
|
|---|
| 306 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 307 | bool print_stats;
|
|---|
| 308 | #endif
|
|---|
| 309 | };
|
|---|
| 310 | extern Duration default_preemption();
|
|---|
| 311 |
|
|---|
| 312 | void ?{} (cluster & this, const char name[], Duration preemption_rate, int flags);
|
|---|
| 313 | void ^?{}(cluster & this);
|
|---|
| 314 |
|
|---|
| 315 | static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption(), 0}; }
|
|---|
| 316 | static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate, 0}; }
|
|---|
| 317 | static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption(), 0}; }
|
|---|
| 318 | static inline void ?{} (cluster & this, int flags) { this{"Anonymous Cluster", default_preemption(), flags}; }
|
|---|
| 319 | static inline void ?{} (cluster & this, Duration preemption_rate, int flags) { this{"Anonymous Cluster", preemption_rate, flags}; }
|
|---|
| 320 | static inline void ?{} (cluster & this, const char name[], int flags) { this{name, default_preemption(), flags}; }
|
|---|
| 321 |
|
|---|
| 322 | static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
|
|---|
| 323 |
|
|---|
| 324 | static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE
|
|---|
| 325 | static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; }
|
|---|
| 326 |
|
|---|
| 327 | #if !defined(__CFA_NO_STATISTICS__)
|
|---|
| 328 | static inline void print_stats_at_exit( cluster & this ) {
|
|---|
| 329 | this.print_stats = true;
|
|---|
| 330 | }
|
|---|
| 331 | #endif
|
|---|
| 332 |
|
|---|
| 333 | // Local Variables: //
|
|---|
| 334 | // mode: c //
|
|---|
| 335 | // tab-width: 4 //
|
|---|
| 336 | // End: //
|
|---|