[9d0ff30] | 1 | #pragma once |
---|
| 2 | |
---|
[c042d79] | 3 | #include <locks.hfa> |
---|
| 4 | #include <limits.hfa> |
---|
[9d0ff30] | 5 | #include <kernel.hfa> |
---|
[1e38178] | 6 | #include <iofwd.hfa> |
---|
[c042d79] | 7 | |
---|
| 8 | #ifdef __CFA_DEBUG__ |
---|
| 9 | #define CFA_DEBUG( stmt ) stmt |
---|
| 10 | #else |
---|
| 11 | #define CFA_DEBUG( stmt ) |
---|
| 12 | #endif // CFA_DEBUG |
---|
| 13 | |
---|
| 14 | // Define the default number of processors created in the executor. Must be greater than 0. |
---|
| 15 | #define __DEFAULT_EXECUTOR_PROCESSORS__ 2 |
---|
| 16 | |
---|
| 17 | // Define the default number of threads created in the executor. Must be greater than 0. |
---|
| 18 | #define __DEFAULT_EXECUTOR_WORKERS__ 2 |
---|
| 19 | |
---|
| 20 | // Define the default number of executor request-queues (mailboxes) written to by actors and serviced by the |
---|
| 21 | // actor-executor threads. Must be greater than 0. |
---|
[1e38178] | 22 | #define __DEFAULT_EXECUTOR_RQUEUES__ 4 |
---|
[c042d79] | 23 | |
---|
| 24 | // Define if executor is created in a separate cluster |
---|
| 25 | #define __DEFAULT_EXECUTOR_SEPCLUS__ false |
---|
| 26 | |
---|
[1e38178] | 27 | #define __DEFAULT_EXECUTOR_BUFSIZE__ 10 |
---|
[2d028039] | 28 | |
---|
[2856044a] | 29 | #define __STEAL 0 // workstealing toggle. Disjoint from toggles above |
---|
[2d028039] | 30 | |
---|
[1e38178] | 31 | // workstealing heuristic selection (only set one to be 1) |
---|
| 32 | // #define RAND 0 |
---|
[2856044a] | 33 | #define SEARCH 1 |
---|
[2d028039] | 34 | |
---|
[1e38178] | 35 | // show stats |
---|
| 36 | // #define STATS |
---|
[ccf1d99] | 37 | |
---|
[c042d79] | 38 | // forward decls |
---|
| 39 | struct actor; |
---|
| 40 | struct message; |
---|
[1e38178] | 41 | struct executor; |
---|
[c042d79] | 42 | |
---|
| 43 | enum Allocation { Nodelete, Delete, Destroy, Finished }; // allocation status |
---|
| 44 | |
---|
| 45 | typedef Allocation (*__receive_fn)(actor &, message &); |
---|
| 46 | struct request { |
---|
| 47 | actor * receiver; |
---|
| 48 | message * msg; |
---|
| 49 | __receive_fn fn; |
---|
| 50 | bool stop; |
---|
| 51 | }; |
---|
| 52 | |
---|
[ecfe574] | 53 | static inline void ?{}( request & this ) { this.stop = true; } // default ctor makes a sentinel |
---|
| 54 | static inline void ?{}( request & this, actor * receiver, message * msg, __receive_fn fn ) { |
---|
[c042d79] | 55 | this.receiver = receiver; |
---|
| 56 | this.msg = msg; |
---|
| 57 | this.fn = fn; |
---|
| 58 | this.stop = false; |
---|
| 59 | } |
---|
[ecfe574] | 60 | static inline void ?{}( request & this, request & copy ) { |
---|
| 61 | this.receiver = copy.receiver; |
---|
| 62 | this.msg = copy.msg; |
---|
| 63 | this.fn = copy.fn; |
---|
| 64 | this.stop = copy.stop; |
---|
| 65 | } |
---|
| 66 | |
---|
[1e38178] | 67 | // Vector-like data structure that supports O(1) queue operations with no bound on size |
---|
| 68 | // assumes gulping behaviour (once a remove occurs, removes happen until empty beforw next insert) |
---|
[ecfe574] | 69 | struct copy_queue { |
---|
| 70 | request * buffer; |
---|
[2d028039] | 71 | size_t count, buffer_size, index, utilized, last_size; |
---|
[ecfe574] | 72 | }; |
---|
| 73 | static inline void ?{}( copy_queue & this ) {} |
---|
| 74 | static inline void ?{}( copy_queue & this, size_t buf_size ) with(this) { |
---|
| 75 | buffer_size = buf_size; |
---|
| 76 | buffer = aalloc( buffer_size ); |
---|
| 77 | count = 0; |
---|
[2d028039] | 78 | utilized = 0; |
---|
[5c473c9] | 79 | index = 0; |
---|
[2d028039] | 80 | last_size = 0; |
---|
[ecfe574] | 81 | } |
---|
[2d028039] | 82 | static inline void ^?{}( copy_queue & this ) with(this) { adelete(buffer); } |
---|
[ecfe574] | 83 | |
---|
| 84 | static inline void insert( copy_queue & this, request & elem ) with(this) { |
---|
[2d028039] | 85 | if ( count >= buffer_size ) { // increase arr size |
---|
| 86 | last_size = buffer_size; |
---|
| 87 | buffer_size = 2 * buffer_size; |
---|
| 88 | buffer = realloc( buffer, sizeof( request ) * buffer_size ); |
---|
| 89 | /* paranoid */ verify( buffer ); |
---|
[ecfe574] | 90 | } |
---|
[1e38178] | 91 | memcpy( &buffer[count], &elem, sizeof(request) ); |
---|
[2d028039] | 92 | count++; |
---|
[ecfe574] | 93 | } |
---|
| 94 | |
---|
| 95 | // once you start removing you need to remove all elements |
---|
[1e38178] | 96 | // it is not supported to call insert() before the array is fully empty |
---|
[2d028039] | 97 | static inline request & remove( copy_queue & this ) with(this) { |
---|
[ecfe574] | 98 | if ( count > 0 ) { |
---|
| 99 | count--; |
---|
[5c473c9] | 100 | size_t old_idx = index; |
---|
| 101 | index = count == 0 ? 0 : index + 1; |
---|
| 102 | return buffer[old_idx]; |
---|
[ecfe574] | 103 | } |
---|
[2d028039] | 104 | request * ret = 0p; |
---|
| 105 | return *0p; |
---|
[ecfe574] | 106 | } |
---|
| 107 | |
---|
[1e38178] | 108 | // try to reclaim some memory if less than half of buffer is utilized |
---|
[2d028039] | 109 | static inline void reclaim( copy_queue & this ) with(this) { |
---|
| 110 | if ( utilized >= last_size || buffer_size <= 4 ) { utilized = 0; return; } |
---|
| 111 | utilized = 0; |
---|
| 112 | buffer_size--; |
---|
| 113 | buffer = realloc( buffer, sizeof( request ) * buffer_size ); // try to reclaim some memory |
---|
[ccf1d99] | 114 | } |
---|
[c042d79] | 115 | |
---|
[2d028039] | 116 | static inline bool isEmpty( copy_queue & this ) with(this) { return count == 0; } |
---|
| 117 | |
---|
[c042d79] | 118 | struct work_queue { |
---|
[ccf1d99] | 119 | __spinlock_t mutex_lock; |
---|
[1e38178] | 120 | copy_queue * owned_queue; // copy queue allocated and cleaned up by this work_queue |
---|
| 121 | copy_queue * c_queue; // current queue |
---|
| 122 | volatile bool being_processed; // flag to prevent concurrent processing |
---|
| 123 | #ifdef STATS |
---|
| 124 | unsigned int id; |
---|
| 125 | size_t missed; // transfers skipped due to being_processed flag being up |
---|
| 126 | #endif |
---|
[c042d79] | 127 | }; // work_queue |
---|
[1e38178] | 128 | static inline void ?{}( work_queue & this, size_t buf_size, unsigned int i ) with(this) { |
---|
| 129 | owned_queue = alloc(); // allocated separately to avoid false sharing |
---|
| 130 | (*owned_queue){ buf_size }; |
---|
| 131 | c_queue = owned_queue; |
---|
[2d028039] | 132 | being_processed = false; |
---|
[1e38178] | 133 | #ifdef STATS |
---|
| 134 | id = i; |
---|
| 135 | missed = 0; |
---|
| 136 | #endif |
---|
[ecfe574] | 137 | } |
---|
[c042d79] | 138 | |
---|
[1e38178] | 139 | // clean up copy_queue owned by this work_queue |
---|
| 140 | static inline void ^?{}( work_queue & this ) with(this) { delete( owned_queue ); } |
---|
| 141 | |
---|
[ecfe574] | 142 | static inline void insert( work_queue & this, request & elem ) with(this) { |
---|
[ccf1d99] | 143 | lock( mutex_lock __cfaabi_dbg_ctx2 ); |
---|
[ecfe574] | 144 | insert( *c_queue, elem ); |
---|
[c042d79] | 145 | unlock( mutex_lock ); |
---|
| 146 | } // insert |
---|
| 147 | |
---|
[1e38178] | 148 | static inline void transfer( work_queue & this, copy_queue ** transfer_to ) with(this) { |
---|
[ccf1d99] | 149 | lock( mutex_lock __cfaabi_dbg_ctx2 ); |
---|
[1e38178] | 150 | #ifdef __STEAL |
---|
[2d028039] | 151 | |
---|
[1e38178] | 152 | // check if queue is being processed elsewhere |
---|
| 153 | if ( unlikely( being_processed ) ) { |
---|
| 154 | #ifdef STATS |
---|
| 155 | missed++; |
---|
| 156 | #endif |
---|
[2d028039] | 157 | unlock( mutex_lock ); |
---|
| 158 | return; |
---|
| 159 | } |
---|
| 160 | |
---|
| 161 | being_processed = c_queue->count != 0; |
---|
| 162 | #endif // __STEAL |
---|
| 163 | |
---|
| 164 | c_queue->utilized = c_queue->count; |
---|
| 165 | |
---|
[ecfe574] | 166 | // swap copy queue ptrs |
---|
| 167 | copy_queue * temp = *transfer_to; |
---|
| 168 | *transfer_to = c_queue; |
---|
| 169 | c_queue = temp; |
---|
[c042d79] | 170 | unlock( mutex_lock ); |
---|
| 171 | } // transfer |
---|
| 172 | |
---|
[1e38178] | 173 | // needed since some info needs to persist past worker lifetimes |
---|
| 174 | struct worker_info { |
---|
| 175 | volatile unsigned long long stamp; |
---|
| 176 | #ifdef STATS |
---|
[f23d34db] | 177 | size_t stolen_from, try_steal, stolen, failed_swaps, msgs_stolen; |
---|
| 178 | unsigned long long processed; |
---|
| 179 | size_t gulps; |
---|
[1e38178] | 180 | #endif |
---|
| 181 | }; |
---|
| 182 | static inline void ?{}( worker_info & this ) { |
---|
| 183 | #ifdef STATS |
---|
| 184 | this.stolen_from = 0; |
---|
[f23d34db] | 185 | this.try_steal = 0; // attempts to steal |
---|
| 186 | this.stolen = 0; // successful steals |
---|
| 187 | this.processed = 0; // requests processed |
---|
| 188 | this.gulps = 0; // number of gulps |
---|
| 189 | this.failed_swaps = 0; // steal swap failures |
---|
| 190 | this.msgs_stolen = 0; // number of messages stolen |
---|
[1e38178] | 191 | #endif |
---|
| 192 | this.stamp = rdtscl(); |
---|
| 193 | } |
---|
| 194 | |
---|
[f23d34db] | 195 | // #ifdef STATS |
---|
| 196 | // unsigned int * stolen_arr; |
---|
| 197 | // unsigned int * replaced_queue; |
---|
| 198 | // #endif |
---|
[c042d79] | 199 | thread worker { |
---|
[2d028039] | 200 | work_queue ** request_queues; |
---|
[ecfe574] | 201 | copy_queue * current_queue; |
---|
[1e38178] | 202 | executor * executor_; |
---|
| 203 | unsigned int start, range; |
---|
| 204 | int id; |
---|
[c042d79] | 205 | }; |
---|
| 206 | |
---|
[1e38178] | 207 | #ifdef STATS |
---|
| 208 | // aggregate counters for statistics |
---|
[f23d34db] | 209 | size_t __total_tries = 0, __total_stolen = 0, __total_workers, __all_gulps = 0, |
---|
| 210 | __total_failed_swaps = 0, __all_processed = 0, __num_actors_stats = 0, __all_msgs_stolen = 0; |
---|
[2d028039] | 211 | #endif |
---|
[1e38178] | 212 | static inline void ?{}( worker & this, cluster & clu, work_queue ** request_queues, copy_queue * current_queue, executor * executor_, |
---|
| 213 | unsigned int start, unsigned int range, int id ) { |
---|
[c042d79] | 214 | ((thread &)this){ clu }; |
---|
[1e38178] | 215 | this.request_queues = request_queues; // array of all queues |
---|
| 216 | this.current_queue = current_queue; // currently gulped queue (start with empty queue to use in swap later) |
---|
| 217 | this.executor_ = executor_; // pointer to current executor |
---|
| 218 | this.start = start; // start of worker's subrange of request_queues |
---|
| 219 | this.range = range; // size of worker's subrange of request_queues |
---|
| 220 | this.id = id; // worker's id and index in array of workers |
---|
[c042d79] | 221 | } |
---|
| 222 | |
---|
[1e38178] | 223 | static bool no_steal = false; |
---|
[c042d79] | 224 | struct executor { |
---|
| 225 | cluster * cluster; // if workers execute on separate cluster |
---|
| 226 | processor ** processors; // array of virtual processors adding parallelism for workers |
---|
[2d028039] | 227 | work_queue * request_queues; // master array of work request queues |
---|
| 228 | copy_queue * local_queues; // array of all worker local queues to avoid deletion race |
---|
| 229 | work_queue ** worker_req_queues; // secondary array of work queues to allow for swapping |
---|
| 230 | worker ** workers; // array of workers executing work requests |
---|
[1e38178] | 231 | worker_info * w_infos; // array of info about each worker |
---|
[c042d79] | 232 | unsigned int nprocessors, nworkers, nrqueues; // number of processors/threads/request queues |
---|
| 233 | bool seperate_clus; // use same or separate cluster for executor |
---|
| 234 | }; // executor |
---|
| 235 | |
---|
[1e38178] | 236 | // #ifdef STATS |
---|
| 237 | // __spinlock_t out_lock; |
---|
| 238 | // #endif |
---|
| 239 | static inline void ^?{}( worker & mutex this ) with(this) { |
---|
| 240 | #ifdef STATS |
---|
[f23d34db] | 241 | __atomic_add_fetch(&__all_gulps, executor_->w_infos[id].gulps,__ATOMIC_SEQ_CST); |
---|
| 242 | __atomic_add_fetch(&__all_processed, executor_->w_infos[id].processed,__ATOMIC_SEQ_CST); |
---|
| 243 | __atomic_add_fetch(&__all_msgs_stolen, executor_->w_infos[id].msgs_stolen,__ATOMIC_SEQ_CST); |
---|
| 244 | __atomic_add_fetch(&__total_tries, executor_->w_infos[id].try_steal, __ATOMIC_SEQ_CST); |
---|
| 245 | __atomic_add_fetch(&__total_stolen, executor_->w_infos[id].stolen, __ATOMIC_SEQ_CST); |
---|
| 246 | __atomic_add_fetch(&__total_failed_swaps, executor_->w_infos[id].failed_swaps, __ATOMIC_SEQ_CST); |
---|
[1e38178] | 247 | |
---|
| 248 | // per worker steal stats (uncomment alongside the lock above this routine to print) |
---|
| 249 | // lock( out_lock __cfaabi_dbg_ctx2 ); |
---|
| 250 | // printf("Worker id: %d, processed: %llu messages, attempted %lu, stole: %lu, stolen from: %lu\n", id, processed, try_steal, stolen, __atomic_add_fetch(&executor_->w_infos[id].stolen_from, 0, __ATOMIC_SEQ_CST) ); |
---|
| 251 | // int count = 0; |
---|
| 252 | // int count2 = 0; |
---|
| 253 | // for ( i; range ) { |
---|
| 254 | // if ( replaced_queue[start + i] > 0 ){ |
---|
| 255 | // count++; |
---|
| 256 | // // printf("%d: %u, ",i, replaced_queue[i]); |
---|
| 257 | // } |
---|
| 258 | // if (__atomic_add_fetch(&stolen_arr[start + i],0,__ATOMIC_SEQ_CST) > 0) |
---|
| 259 | // count2++; |
---|
| 260 | // } |
---|
| 261 | // printf("swapped with: %d of %u indices\n", count, executor_->nrqueues / executor_->nworkers ); |
---|
| 262 | // printf("%d of %u indices were stolen\n", count2, executor_->nrqueues / executor_->nworkers ); |
---|
| 263 | // unlock( out_lock ); |
---|
| 264 | #endif |
---|
| 265 | } |
---|
| 266 | |
---|
[ecfe574] | 267 | static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers, unsigned int nrqueues, bool seperate_clus, size_t buf_size ) with(this) { |
---|
[c042d79] | 268 | if ( nrqueues < nworkers ) abort( "nrqueues needs to be >= nworkers\n" ); |
---|
| 269 | this.nprocessors = nprocessors; |
---|
| 270 | this.nworkers = nworkers; |
---|
| 271 | this.nrqueues = nrqueues; |
---|
| 272 | this.seperate_clus = seperate_clus; |
---|
| 273 | |
---|
[1e38178] | 274 | if ( nworkers == nrqueues ) |
---|
| 275 | no_steal = true; |
---|
| 276 | |
---|
| 277 | #ifdef STATS |
---|
[f23d34db] | 278 | // stolen_arr = aalloc( nrqueues ); |
---|
| 279 | // replaced_queue = aalloc( nrqueues ); |
---|
| 280 | __total_workers = nworkers; |
---|
[1e38178] | 281 | #endif |
---|
| 282 | |
---|
[c042d79] | 283 | if ( seperate_clus ) { |
---|
| 284 | cluster = alloc(); |
---|
| 285 | (*cluster){}; |
---|
| 286 | } else cluster = active_cluster(); |
---|
| 287 | |
---|
| 288 | request_queues = aalloc( nrqueues ); |
---|
[2d028039] | 289 | worker_req_queues = aalloc( nrqueues ); |
---|
| 290 | for ( i; nrqueues ) { |
---|
[1e38178] | 291 | request_queues[i]{ buf_size, i }; |
---|
[2d028039] | 292 | worker_req_queues[i] = &request_queues[i]; |
---|
| 293 | } |
---|
[c042d79] | 294 | |
---|
| 295 | processors = aalloc( nprocessors ); |
---|
| 296 | for ( i; nprocessors ) |
---|
| 297 | (*(processors[i] = alloc())){ *cluster }; |
---|
| 298 | |
---|
[2d028039] | 299 | local_queues = aalloc( nworkers ); |
---|
[1e38178] | 300 | workers = aalloc( nworkers ); |
---|
| 301 | w_infos = aalloc( nworkers ); |
---|
[c042d79] | 302 | unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers; |
---|
[1e38178] | 303 | |
---|
| 304 | for ( i; nworkers ) { |
---|
| 305 | w_infos[i]{}; |
---|
[2d028039] | 306 | local_queues[i]{ buf_size }; |
---|
[1e38178] | 307 | } |
---|
| 308 | |
---|
| 309 | for ( unsigned int i = 0, start = 0, range; i < nworkers; i += 1, start += range ) { |
---|
[c042d79] | 310 | range = reqPerWorker + ( i < extras ? 1 : 0 ); |
---|
[1e38178] | 311 | (*(workers[i] = alloc())){ *cluster, worker_req_queues, &local_queues[i], &this, start, range, i }; |
---|
[c042d79] | 312 | } // for |
---|
| 313 | } |
---|
[1e38178] | 314 | static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers, unsigned int nrqueues, bool seperate_clus ) { this{ nprocessors, nworkers, nrqueues, seperate_clus, __DEFAULT_EXECUTOR_BUFSIZE__ }; } |
---|
[c042d79] | 315 | static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers, unsigned int nrqueues ) { this{ nprocessors, nworkers, nrqueues, __DEFAULT_EXECUTOR_SEPCLUS__ }; } |
---|
| 316 | static inline void ?{}( executor & this, unsigned int nprocessors, unsigned int nworkers ) { this{ nprocessors, nworkers, __DEFAULT_EXECUTOR_RQUEUES__ }; } |
---|
| 317 | static inline void ?{}( executor & this, unsigned int nprocessors ) { this{ nprocessors, __DEFAULT_EXECUTOR_WORKERS__ }; } |
---|
| 318 | static inline void ?{}( executor & this ) { this{ __DEFAULT_EXECUTOR_PROCESSORS__ }; } |
---|
| 319 | |
---|
| 320 | static inline void ^?{}( executor & this ) with(this) { |
---|
[1e38178] | 321 | #ifdef __STEAL |
---|
[2d028039] | 322 | request sentinels[nrqueues]; |
---|
| 323 | for ( unsigned int i = 0; i < nrqueues; i++ ) { |
---|
| 324 | insert( request_queues[i], sentinels[i] ); // force eventually termination |
---|
| 325 | } // for |
---|
| 326 | #else |
---|
[c042d79] | 327 | request sentinels[nworkers]; |
---|
[1e38178] | 328 | unsigned int reqPerWorker = nrqueues / nworkers, extras = nrqueues % nworkers; |
---|
| 329 | for ( unsigned int i = 0, step = 0, range; i < nworkers; i += 1, step += range ) { |
---|
| 330 | range = reqPerWorker + ( i < extras ? 1 : 0 ); |
---|
[c042d79] | 331 | insert( request_queues[step], sentinels[i] ); // force eventually termination |
---|
| 332 | } // for |
---|
[2d028039] | 333 | #endif |
---|
[c042d79] | 334 | |
---|
| 335 | for ( i; nworkers ) |
---|
| 336 | delete( workers[i] ); |
---|
| 337 | |
---|
| 338 | for ( i; nprocessors ) { |
---|
| 339 | delete( processors[i] ); |
---|
| 340 | } // for |
---|
| 341 | |
---|
[1e38178] | 342 | #ifdef STATS |
---|
| 343 | size_t misses = 0; |
---|
| 344 | for ( i; nrqueues ) { |
---|
| 345 | misses += worker_req_queues[i]->missed; |
---|
| 346 | } |
---|
[f23d34db] | 347 | // adelete( stolen_arr ); |
---|
| 348 | // adelete( replaced_queue ); |
---|
[1e38178] | 349 | #endif |
---|
| 350 | |
---|
[ecfe574] | 351 | adelete( workers ); |
---|
[1e38178] | 352 | adelete( w_infos ); |
---|
[2d028039] | 353 | adelete( local_queues ); |
---|
[ecfe574] | 354 | adelete( request_queues ); |
---|
[2d028039] | 355 | adelete( worker_req_queues ); |
---|
[ecfe574] | 356 | adelete( processors ); |
---|
[c042d79] | 357 | if ( seperate_clus ) delete( cluster ); |
---|
[1e38178] | 358 | |
---|
| 359 | #ifdef STATS |
---|
| 360 | printf(" Actor System Stats:\n"); |
---|
[f23d34db] | 361 | printf("\tActors Created:\t\t\t\t%lu\n\tMessages Sent:\t\t\t\t%lu\n", __num_actors_stats, __all_processed); |
---|
| 362 | size_t avg_gulps = __all_gulps == 0 ? 0 : __all_processed / __all_gulps; |
---|
| 363 | printf("\tGulps:\t\t\t\t\t%lu\n\tAverage Gulp Size:\t\t\t%lu\n\tMissed gulps:\t\t\t\t%lu\n", __all_gulps, avg_gulps, misses); |
---|
[1e38178] | 364 | printf("\tSteal attempts:\t\t\t\t%lu\n\tSteals:\t\t\t\t\t%lu\n\tSteal failures (no candidates):\t\t%lu\n\tSteal failures (failed swaps):\t\t%lu\n", |
---|
[f23d34db] | 365 | __total_tries, __total_stolen, __total_tries - __total_stolen - __total_failed_swaps, __total_failed_swaps); |
---|
| 366 | size_t avg_steal = __total_stolen == 0 ? 0 : __all_msgs_stolen / __total_stolen; |
---|
| 367 | printf("\tMessages stolen:\t\t\t%lu\n\tAverage steal size:\t\t\t%lu\n", __all_msgs_stolen, avg_steal); |
---|
[1e38178] | 368 | #endif |
---|
| 369 | |
---|
[c042d79] | 370 | } |
---|
| 371 | |
---|
| 372 | // this is a static field of executor but have to forward decl for get_next_ticket |
---|
[e23169b] | 373 | static size_t __next_ticket = 0; |
---|
[1e38178] | 374 | |
---|
[e23169b] | 375 | static inline size_t __get_next_ticket( executor & this ) with(this) { |
---|
| 376 | #ifdef __CFA_DEBUG__ |
---|
| 377 | size_t temp = __atomic_fetch_add( &__next_ticket, 1, __ATOMIC_SEQ_CST) % nrqueues; |
---|
[c042d79] | 378 | |
---|
[1e38178] | 379 | // reserve MAX for dead actors |
---|
[e23169b] | 380 | if ( unlikely( temp == MAX ) ) temp = __atomic_fetch_add( &__next_ticket, 1, __ATOMIC_SEQ_CST) % nrqueues; |
---|
[1e38178] | 381 | return temp; |
---|
[e23169b] | 382 | #else |
---|
| 383 | return __atomic_fetch_add( &__next_ticket, 1, __ATOMIC_RELAXED) % nrqueues; |
---|
| 384 | #endif |
---|
[c042d79] | 385 | } // tickets |
---|
| 386 | |
---|
[1e38178] | 387 | // TODO: update globals in this file to be static fields once the static fields project is done |
---|
[c042d79] | 388 | static executor * __actor_executor_ = 0p; |
---|
[1e38178] | 389 | static bool __actor_executor_passed = false; // was an executor passed to start_actor_system |
---|
[e23169b] | 390 | static size_t __num_actors_ = 0; // number of actor objects in system |
---|
[c042d79] | 391 | static struct thread$ * __actor_executor_thd = 0p; // used to wake executor after actors finish |
---|
| 392 | struct actor { |
---|
[e23169b] | 393 | size_t ticket; // executor-queue handle |
---|
[1e38178] | 394 | Allocation allocation_; // allocation action |
---|
[c042d79] | 395 | }; |
---|
| 396 | |
---|
[e23169b] | 397 | static inline void ?{}( actor & this ) with(this) { |
---|
[c042d79] | 398 | // Once an actor is allocated it must be sent a message or the actor system cannot stop. Hence, its receive |
---|
| 399 | // member must be called to end it |
---|
[1e38178] | 400 | verifyf( __actor_executor_, "Creating actor before calling start_actor_system() can cause undefined behaviour.\n" ); |
---|
[e23169b] | 401 | allocation_ = Nodelete; |
---|
| 402 | ticket = __get_next_ticket( *__actor_executor_ ); |
---|
| 403 | __atomic_fetch_add( &__num_actors_, 1, __ATOMIC_RELAXED ); |
---|
[1e38178] | 404 | #ifdef STATS |
---|
| 405 | __atomic_fetch_add( &__num_actors_stats, 1, __ATOMIC_SEQ_CST ); |
---|
| 406 | #endif |
---|
[c042d79] | 407 | } |
---|
[ecfe574] | 408 | static inline void ^?{}( actor & this ) {} |
---|
[c042d79] | 409 | |
---|
| 410 | static inline void check_actor( actor & this ) { |
---|
| 411 | if ( this.allocation_ != Nodelete ) { |
---|
| 412 | switch( this.allocation_ ) { |
---|
| 413 | case Delete: delete( &this ); break; |
---|
| 414 | case Destroy: |
---|
| 415 | CFA_DEBUG( this.ticket = MAX; ); // mark as terminated |
---|
| 416 | ^?{}(this); |
---|
| 417 | break; |
---|
| 418 | case Finished: |
---|
| 419 | CFA_DEBUG( this.ticket = MAX; ); // mark as terminated |
---|
| 420 | break; |
---|
| 421 | default: ; // stop warning |
---|
| 422 | } |
---|
| 423 | |
---|
[e23169b] | 424 | if ( unlikely( __atomic_add_fetch( &__num_actors_, -1, __ATOMIC_RELAXED ) == 0 ) ) { // all actors have terminated |
---|
[c042d79] | 425 | unpark( __actor_executor_thd ); |
---|
| 426 | } |
---|
| 427 | } |
---|
| 428 | } |
---|
| 429 | |
---|
| 430 | struct message { |
---|
| 431 | Allocation allocation_; // allocation action |
---|
| 432 | }; |
---|
| 433 | |
---|
[ecfe574] | 434 | static inline void ?{}( message & this ) { this.allocation_ = Nodelete; } |
---|
[1e38178] | 435 | static inline void ?{}( message & this, Allocation allocation ) { |
---|
[e23169b] | 436 | memcpy( &this.allocation_, &allocation, sizeof(allocation) ); // optimization to elide ctor |
---|
[1e38178] | 437 | verifyf( this.allocation_ != Finished, "The Finished Allocation status is not supported for message types.\n"); |
---|
| 438 | } |
---|
| 439 | static inline void ^?{}( message & this ) { |
---|
| 440 | CFA_DEBUG( if ( this.allocation_ == Nodelete ) printf("A message at location %p was allocated but never sent.\n", &this); ) |
---|
| 441 | } |
---|
[c042d79] | 442 | |
---|
| 443 | static inline void check_message( message & this ) { |
---|
[99fb52c] | 444 | #ifdef __CFA_DEBUG__ |
---|
| 445 | Allocation temp = this.allocation_; |
---|
| 446 | this.allocation_ = Finished; |
---|
| 447 | switch ( temp ) |
---|
| 448 | #else |
---|
| 449 | switch ( this.allocation_ ) |
---|
| 450 | #endif |
---|
| 451 | { // analyze message status |
---|
[f23d34db] | 452 | case Nodelete: break; |
---|
[c042d79] | 453 | case Delete: delete( &this ); break; |
---|
| 454 | case Destroy: ^?{}(this); break; |
---|
| 455 | case Finished: break; |
---|
| 456 | } // switch |
---|
| 457 | } |
---|
[1e38178] | 458 | static inline void set_allocation( message & this, Allocation state ) { this.allocation_ = state; } |
---|
[c042d79] | 459 | |
---|
[ecfe574] | 460 | static inline void deliver_request( request & this ) { |
---|
[e23169b] | 461 | this.receiver->allocation_ = this.fn( *this.receiver, *this.msg ); |
---|
[c042d79] | 462 | check_message( *this.msg ); |
---|
[681d8f2] | 463 | check_actor( *this.receiver ); |
---|
[c042d79] | 464 | } |
---|
| 465 | |
---|
[1e38178] | 466 | // tries to atomically swap two queues and returns 0p if the swap failed |
---|
| 467 | // returns ptr to newly owned queue if swap succeeds |
---|
| 468 | static inline work_queue * try_swap_queues( worker & this, unsigned int victim_idx, unsigned int my_idx ) with(this) { |
---|
[2d028039] | 469 | work_queue * my_queue = request_queues[my_idx]; |
---|
| 470 | work_queue * other_queue = request_queues[victim_idx]; |
---|
[1e38178] | 471 | |
---|
| 472 | // if either queue is 0p then they are in the process of being stolen |
---|
| 473 | if ( other_queue == 0p ) return 0p; |
---|
[2d028039] | 474 | |
---|
| 475 | // try to set our queue ptr to be 0p. If it fails someone moved our queue so return false |
---|
| 476 | if ( !__atomic_compare_exchange_n( &request_queues[my_idx], &my_queue, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) |
---|
[1e38178] | 477 | return 0p; |
---|
[2d028039] | 478 | |
---|
| 479 | // try to set other queue ptr to be our queue ptr. If it fails someone moved the other queue so fix up then return false |
---|
| 480 | if ( !__atomic_compare_exchange_n( &request_queues[victim_idx], &other_queue, my_queue, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ) { |
---|
| 481 | /* paranoid */ verify( request_queues[my_idx] == 0p ); |
---|
| 482 | request_queues[my_idx] = my_queue; // reset my queue ptr back to appropriate val |
---|
[1e38178] | 483 | return 0p; |
---|
[2d028039] | 484 | } |
---|
| 485 | |
---|
| 486 | // we have successfully swapped and since our queue is 0p no one will touch it so write back new queue ptr non atomically |
---|
| 487 | request_queues[my_idx] = other_queue; // last write does not need to be atomic |
---|
[1e38178] | 488 | return other_queue; |
---|
[2d028039] | 489 | } |
---|
| 490 | |
---|
| 491 | // once a worker to steal from has been chosen, choose queue to steal from |
---|
[1e38178] | 492 | static inline void choose_queue( worker & this, unsigned int victim_id, unsigned int swap_idx ) with(this) { |
---|
[2d028039] | 493 | // have to calculate victim start and range since victim may be deleted before us in shutdown |
---|
[1e38178] | 494 | const unsigned int queues_per_worker = executor_->nrqueues / executor_->nworkers; |
---|
| 495 | const unsigned int extras = executor_->nrqueues % executor_->nworkers; |
---|
[2d028039] | 496 | unsigned int vic_start, vic_range; |
---|
| 497 | if ( extras > victim_id ) { |
---|
| 498 | vic_range = queues_per_worker + 1; |
---|
| 499 | vic_start = vic_range * victim_id; |
---|
| 500 | } else { |
---|
| 501 | vic_start = extras + victim_id * queues_per_worker; |
---|
| 502 | vic_range = queues_per_worker; |
---|
| 503 | } |
---|
| 504 | unsigned int start_idx = prng( vic_range ); |
---|
[1e38178] | 505 | |
---|
[2d028039] | 506 | unsigned int tries = 0; |
---|
| 507 | work_queue * curr_steal_queue; |
---|
| 508 | |
---|
| 509 | for ( unsigned int i = start_idx; tries < vic_range; i = (i + 1) % vic_range ) { |
---|
| 510 | tries++; |
---|
| 511 | curr_steal_queue = request_queues[ i + vic_start ]; |
---|
| 512 | // avoid empty queues and queues that are being operated on |
---|
| 513 | if ( curr_steal_queue == 0p || curr_steal_queue->being_processed || isEmpty( *curr_steal_queue->c_queue ) ) |
---|
| 514 | continue; |
---|
| 515 | |
---|
[1e38178] | 516 | #ifdef STATS |
---|
| 517 | curr_steal_queue = try_swap_queues( this, i + vic_start, swap_idx ); |
---|
| 518 | if ( curr_steal_queue ) { |
---|
[f23d34db] | 519 | executor_->w_infos[id].msgs_stolen += curr_steal_queue->c_queue->count; |
---|
| 520 | executor_->w_infos[id].stolen++; |
---|
| 521 | // __atomic_add_fetch(&executor_->w_infos[victim_id].stolen_from, 1, __ATOMIC_RELAXED); |
---|
| 522 | // replaced_queue[swap_idx]++; |
---|
| 523 | // __atomic_add_fetch(&stolen_arr[ i + vic_start ], 1, __ATOMIC_RELAXED); |
---|
[1e38178] | 524 | } else { |
---|
[f23d34db] | 525 | executor_->w_infos[id].failed_swaps++; |
---|
[1e38178] | 526 | } |
---|
[2d028039] | 527 | #else |
---|
[1e38178] | 528 | curr_steal_queue = try_swap_queues( this, i + vic_start, swap_idx ); |
---|
| 529 | #endif // STATS |
---|
[2d028039] | 530 | |
---|
[1e38178] | 531 | return; |
---|
[2d028039] | 532 | } |
---|
[1e38178] | 533 | |
---|
| 534 | return; |
---|
[2d028039] | 535 | } |
---|
| 536 | |
---|
| 537 | // choose a worker to steal from |
---|
[1e38178] | 538 | static inline void steal_work( worker & this, unsigned int swap_idx ) with(this) { |
---|
| 539 | #if RAND |
---|
| 540 | unsigned int victim = prng( executor_->nworkers ); |
---|
| 541 | if ( victim == id ) victim = ( victim + 1 ) % executor_->nworkers; |
---|
| 542 | choose_queue( this, victim, swap_idx ); |
---|
| 543 | #elif SEARCH |
---|
| 544 | unsigned long long min = MAX; // smaller timestamp means longer since service |
---|
| 545 | int min_id = 0; // use ints not uints to avoid integer underflow without hacky math |
---|
| 546 | int n_workers = executor_->nworkers; |
---|
| 547 | unsigned long long curr_stamp; |
---|
| 548 | int scount = 1; |
---|
| 549 | for ( int i = (id + 1) % n_workers; scount < n_workers; i = (i + 1) % n_workers, scount++ ) { |
---|
| 550 | curr_stamp = executor_->w_infos[i].stamp; |
---|
| 551 | if ( curr_stamp < min ) { |
---|
| 552 | min = curr_stamp; |
---|
| 553 | min_id = i; |
---|
| 554 | } |
---|
| 555 | } |
---|
| 556 | choose_queue( this, min_id, swap_idx ); |
---|
[2d028039] | 557 | #endif |
---|
| 558 | } |
---|
| 559 | |
---|
[c042d79] | 560 | void main( worker & this ) with(this) { |
---|
[f23d34db] | 561 | // #ifdef STATS |
---|
| 562 | // for ( i; executor_->nrqueues ) { |
---|
| 563 | // replaced_queue[i] = 0; |
---|
| 564 | // __atomic_store_n( &stolen_arr[i], 0, __ATOMIC_SEQ_CST ); |
---|
| 565 | // } |
---|
| 566 | // #endif |
---|
[1e38178] | 567 | |
---|
[2d028039] | 568 | // threshold of empty queues we see before we go stealing |
---|
[1e38178] | 569 | const unsigned int steal_threshold = 2 * range; |
---|
| 570 | |
---|
| 571 | // Store variable data here instead of worker struct to avoid any potential false sharing |
---|
| 572 | unsigned int empty_count = 0; |
---|
| 573 | request & req; |
---|
[2d028039] | 574 | work_queue * curr_work_queue; |
---|
[1e38178] | 575 | |
---|
[c042d79] | 576 | Exit: |
---|
| 577 | for ( unsigned int i = 0;; i = (i + 1) % range ) { // cycle through set of request buffers |
---|
[e23169b] | 578 | curr_work_queue = request_queues[i + start]; |
---|
[1e38178] | 579 | |
---|
| 580 | // check if queue is empty before trying to gulp it |
---|
| 581 | if ( isEmpty( *curr_work_queue->c_queue ) ) { |
---|
| 582 | #ifdef __STEAL |
---|
| 583 | empty_count++; |
---|
| 584 | if ( empty_count < steal_threshold ) continue; |
---|
| 585 | #else |
---|
| 586 | continue; |
---|
| 587 | #endif |
---|
| 588 | } |
---|
| 589 | transfer( *curr_work_queue, ¤t_queue ); |
---|
| 590 | #ifdef STATS |
---|
[f23d34db] | 591 | executor_->w_infos[id].gulps++; |
---|
[1e38178] | 592 | #endif // STATS |
---|
| 593 | #ifdef __STEAL |
---|
[2d028039] | 594 | if ( isEmpty( *current_queue ) ) { |
---|
[1e38178] | 595 | if ( unlikely( no_steal ) ) continue; |
---|
[2d028039] | 596 | empty_count++; |
---|
| 597 | if ( empty_count < steal_threshold ) continue; |
---|
[1e38178] | 598 | empty_count = 0; |
---|
[2d028039] | 599 | |
---|
[1e38178] | 600 | __atomic_store_n( &executor_->w_infos[id].stamp, rdtscl(), __ATOMIC_RELAXED ); |
---|
| 601 | |
---|
| 602 | #ifdef STATS |
---|
[f23d34db] | 603 | executor_->w_infos[id].try_steal++; |
---|
[1e38178] | 604 | #endif // STATS |
---|
[2d028039] | 605 | |
---|
[1e38178] | 606 | steal_work( this, start + prng( range ) ); |
---|
| 607 | continue; |
---|
[2d028039] | 608 | } |
---|
[1e38178] | 609 | #endif // __STEAL |
---|
[ecfe574] | 610 | while ( ! isEmpty( *current_queue ) ) { |
---|
[1e38178] | 611 | #ifdef STATS |
---|
[f23d34db] | 612 | executor_->w_infos[id].processed++; |
---|
[1e38178] | 613 | #endif |
---|
[2d028039] | 614 | &req = &remove( *current_queue ); |
---|
[1e38178] | 615 | if ( !&req ) continue; |
---|
[c042d79] | 616 | if ( req.stop ) break Exit; |
---|
| 617 | deliver_request( req ); |
---|
[2d028039] | 618 | } |
---|
[1e38178] | 619 | #ifdef __STEAL |
---|
[2d028039] | 620 | curr_work_queue->being_processed = false; // set done processing |
---|
| 621 | empty_count = 0; // we found work so reset empty counter |
---|
[1e38178] | 622 | #endif |
---|
| 623 | |
---|
| 624 | // potentially reclaim some of the current queue's vector space if it is unused |
---|
[2d028039] | 625 | reclaim( *current_queue ); |
---|
[c042d79] | 626 | } // for |
---|
| 627 | } |
---|
| 628 | |
---|
| 629 | static inline void send( executor & this, request & req, unsigned long int ticket ) with(this) { |
---|
| 630 | insert( request_queues[ticket], req); |
---|
| 631 | } |
---|
| 632 | |
---|
| 633 | static inline void send( actor & this, request & req ) { |
---|
[1e38178] | 634 | verifyf( this.ticket != (unsigned long int)MAX, "Attempted to send message to deleted/dead actor\n" ); |
---|
[c042d79] | 635 | send( *__actor_executor_, req, this.ticket ); |
---|
| 636 | } |
---|
| 637 | |
---|
[f23d34db] | 638 | static inline void __reset_stats() { |
---|
| 639 | #ifdef STATS |
---|
| 640 | __total_tries = 0; |
---|
| 641 | __total_stolen = 0; |
---|
| 642 | __all_gulps = 0; |
---|
| 643 | __total_failed_swaps = 0; |
---|
| 644 | __all_processed = 0; |
---|
| 645 | __num_actors_stats = 0; |
---|
| 646 | __all_msgs_stolen = 0; |
---|
| 647 | #endif |
---|
| 648 | } |
---|
| 649 | |
---|
[c042d79] | 650 | static inline void start_actor_system( size_t num_thds ) { |
---|
[f23d34db] | 651 | __reset_stats(); |
---|
[c042d79] | 652 | __actor_executor_thd = active_thread(); |
---|
| 653 | __actor_executor_ = alloc(); |
---|
[ecfe574] | 654 | (*__actor_executor_){ 0, num_thds, num_thds == 1 ? 1 : num_thds * 16 }; |
---|
[c042d79] | 655 | } |
---|
| 656 | |
---|
[f23d34db] | 657 | // TODO: potentially revisit getting number of processors |
---|
[1e38178] | 658 | // ( currently the value stored in active_cluster()->procs.total is often stale |
---|
| 659 | // and doesn't reflect how many procs are allocated ) |
---|
| 660 | // static inline void start_actor_system() { start_actor_system( active_cluster()->procs.total ); } |
---|
| 661 | static inline void start_actor_system() { start_actor_system( 1 ); } |
---|
[c042d79] | 662 | |
---|
| 663 | static inline void start_actor_system( executor & this ) { |
---|
[f23d34db] | 664 | __reset_stats(); |
---|
[c042d79] | 665 | __actor_executor_thd = active_thread(); |
---|
| 666 | __actor_executor_ = &this; |
---|
| 667 | __actor_executor_passed = true; |
---|
| 668 | } |
---|
| 669 | |
---|
| 670 | static inline void stop_actor_system() { |
---|
| 671 | park( ); // will receive signal when actor system is finished |
---|
| 672 | |
---|
| 673 | if ( !__actor_executor_passed ) delete( __actor_executor_ ); |
---|
| 674 | __actor_executor_ = 0p; |
---|
| 675 | __actor_executor_thd = 0p; |
---|
| 676 | __next_ticket = 0; |
---|
| 677 | __actor_executor_passed = false; |
---|
| 678 | } |
---|
[1e38178] | 679 | |
---|
| 680 | // Default messages to send to any actor to change status |
---|
[858350a] | 681 | // assigned at creation to __base_msg_finished to avoid unused message warning |
---|
| 682 | message __base_msg_finished @= { .allocation_ : Finished }; |
---|
[681d8f2] | 683 | struct __DeleteMsg { inline message; } DeleteMsg = __base_msg_finished; |
---|
| 684 | struct __DestroyMsg { inline message; } DestroyMsg = __base_msg_finished; |
---|
| 685 | struct __FinishedMsg { inline message; } FinishedMsg = __base_msg_finished; |
---|
[858350a] | 686 | |
---|
| 687 | Allocation receive( actor & this, __DeleteMsg & msg ) { return Delete; } |
---|
| 688 | Allocation receive( actor & this, __DestroyMsg & msg ) { return Destroy; } |
---|
| 689 | Allocation receive( actor & this, __FinishedMsg & msg ) { return Finished; } |
---|