Changeset dbe2533 for libcfa/src
- Timestamp:
- Mar 21, 2022, 1:40:35 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum
- Children:
- d672350
- Parents:
- b39e961b (diff), 4ecc35a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
rb39e961b rdbe2533 93 93 extern void __kernel_unpark( thread$ * thrd, unpark_hint ); 94 94 95 bool __cfa_io_drain( processor * proc) {95 bool __cfa_io_drain( $io_context * ctx ) { 96 96 /* paranoid */ verify( ! __preemption_enabled() ); 97 97 /* paranoid */ verify( ready_schedule_islocked() ); 98 /* paranoid */ verify( proc ); 99 /* paranoid */ verify( proc->io.ctx ); 98 /* paranoid */ verify( ctx ); 100 99 101 100 // Drain the queue 102 $io_context * ctx = proc->io.ctx;103 101 unsigned head = *ctx->cq.head; 104 102 unsigned tail = *ctx->cq.tail; … … 110 108 if(count == 0) return false; 111 109 110 if(!__atomic_try_acquire(&ctx->cq.lock)) { 111 return false; 112 } 113 112 114 for(i; count) { 113 115 unsigned idx = (head + i) & mask; … … 130 132 /* paranoid */ verify( ready_schedule_islocked() ); 131 133 /* paranoid */ verify( ! __preemption_enabled() ); 134 135 __atomic_unlock(&ctx->cq.lock); 132 136 133 137 return true; … … 179 183 180 184 ready_schedule_lock(); 181 bool ret = __cfa_io_drain( proc);185 bool ret = __cfa_io_drain( &ctx ); 182 186 ready_schedule_unlock(); 183 187 return ret; -
libcfa/src/concurrency/io/setup.cfa
rb39e961b rdbe2533 214 214 215 215 // completion queue 216 cq.lock = 0; 216 217 cq.head = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.head); 217 218 cq.tail = (volatile __u32 *)(((intptr_t)cq.ring_ptr) + params.cq_off.tail); -
libcfa/src/concurrency/io/types.hfa
rb39e961b rdbe2533 78 78 79 79 struct __cmp_ring_t { 80 volatile bool lock; 81 80 82 // Head and tail of the ring 81 83 volatile __u32 * head; -
libcfa/src/concurrency/kernel.cfa
rb39e961b rdbe2533 136 136 static void mark_awake(__cluster_proc_list & idles, processor & proc); 137 137 138 extern bool __cfa_io_drain( processor* );138 extern bool __cfa_io_drain( $io_context * ); 139 139 extern bool __cfa_io_flush( processor *, int min_comp ); 140 140 static inline bool __maybe_io_drain( processor * ); … … 839 839 if(head == tail) return false; 840 840 ready_schedule_lock(); 841 ret = __cfa_io_drain( proc);841 ret = __cfa_io_drain( ctx ); 842 842 ready_schedule_unlock(); 843 843 #endif -
libcfa/src/concurrency/kernel.hfa
rb39e961b rdbe2533 108 108 struct { 109 109 $io_context * ctx; 110 unsigned id; 111 unsigned target; 110 112 volatile bool pending; 111 113 volatile bool dirty; … … 209 211 struct { 210 212 // Arary of subqueues 211 __intrusive_lane_t * volatiledata;213 __intrusive_lane_t * data; 212 214 213 215 // Time since subqueues were processed 214 __timestamp_t * volatiletscs;216 __timestamp_t * tscs; 215 217 216 218 // Number of subqueue / timestamps … … 219 221 220 222 struct { 223 // Array of $io_ 224 $io_context ** data; 225 221 226 // Time since subqueues were processed 222 __timestamp_t * volatiletscs;227 __timestamp_t * tscs; 223 228 224 229 // Number of I/O subqueues … … 227 232 228 233 // Cache each kernel thread belongs to 229 __cache_id_t * volatilecaches;234 __cache_id_t * caches; 230 235 } sched; 231 236 -
libcfa/src/concurrency/kernel/cluster.cfa
rb39e961b rdbe2533 253 253 } 254 254 255 static void assign_list(unsigned & val ue, dlist(processor) & list, unsigned count) {255 static void assign_list(unsigned & valrq, unsigned & valio, dlist(processor) & list, unsigned count) { 256 256 processor * it = &list`first; 257 257 for(unsigned i = 0; i < count; i++) { 258 258 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); 259 it->rdq.id = val ue;259 it->rdq.id = valrq; 260 260 it->rdq.target = MAX; 261 value += __shard_factor.readyq; 261 it->io.id = valio; 262 it->io.target = MAX; 263 valrq += __shard_factor.readyq; 264 valio += __shard_factor.io; 262 265 it = &(*it)`next; 263 266 } … … 265 268 266 269 static void reassign_cltr_id(struct cluster * cltr) { 267 unsigned preferred = 0; 268 assign_list(preferred, cltr->procs.actives, cltr->procs.total - cltr->procs.idle); 269 assign_list(preferred, cltr->procs.idles , cltr->procs.idle ); 270 unsigned prefrq = 0; 271 unsigned prefio = 0; 272 assign_list(prefrq, prefio, cltr->procs.actives, cltr->procs.total - cltr->procs.idle); 273 assign_list(prefrq, prefio, cltr->procs.idles , cltr->procs.idle ); 274 } 275 276 static void assign_io($io_context ** data, size_t count, dlist(processor) & list) { 277 processor * it = &list`first; 278 while(it) { 279 /* paranoid */ verifyf( it, "Unexpected null iterator\n"); 280 /* paranoid */ verifyf( it->io.id < count, "Processor %p has id %u above count %zu\n", it, it->rdq.id, count); 281 data[it->io.id] = it->io.ctx; 282 it = &(*it)`next; 283 } 284 } 285 286 static void reassign_cltr_io(struct cluster * cltr) { 287 assign_io(cltr->sched.io.data, cltr->sched.io.count, cltr->procs.actives); 288 assign_io(cltr->sched.io.data, cltr->sched.io.count, cltr->procs.idles ); 270 289 } 271 290 … … 322 341 323 342 // Fix the io times 324 cltr->sched.io.count = target ;343 cltr->sched.io.count = target * __shard_factor.io; 325 344 fix_times(cltr->sched.io.tscs, cltr->sched.io.count); 326 345 … … 330 349 // reassign the clusters. 331 350 reassign_cltr_id(cltr); 351 352 cltr->sched.io.data = alloc( cltr->sched.io.count, cltr->sched.io.data`realloc ); 353 reassign_cltr_io(cltr); 332 354 333 355 // Make sure that everything is consistent … … 411 433 412 434 // Fix the io times 413 cltr->sched.io.count = target ;435 cltr->sched.io.count = target * __shard_factor.io; 414 436 fix_times(cltr->sched.io.tscs, cltr->sched.io.count); 415 437 416 438 reassign_cltr_id(cltr); 439 440 cltr->sched.io.data = alloc( cltr->sched.io.count, cltr->sched.io.data`realloc ); 441 reassign_cltr_io(cltr); 417 442 418 443 // Make sure that everything is consistent -
libcfa/src/concurrency/kernel/cluster.hfa
rb39e961b rdbe2533 72 72 static struct { 73 73 const unsigned readyq; 74 } __shard_factor = { 2 }; 74 const unsigned io; 75 } __shard_factor = { 2, 1 }; 75 76 76 77 // Local Variables: //
Note:
See TracChangeset
for help on using the changeset viewer.