Changeset 6a9b12b
- Timestamp:
- Apr 14, 2021, 4:28:55 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- fc59b580
- Parents:
- a7504db5
- Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
ra7504db5 r6a9b12b 113 113 static void __wake_one(cluster * cltr); 114 114 115 static void push (__cluster_idles& idles, processor & proc);116 static void remove(__cluster_idles& idles, processor & proc);117 static [unsigned idle, unsigned total, * processor] query ( & __cluster_idlesidles );115 static void mark_idle (__cluster_proc_list & idles, processor & proc); 116 static void mark_awake(__cluster_proc_list & idles, processor & proc); 117 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles ); 118 118 119 119 extern void __cfa_io_start( processor * ); … … 189 189 190 190 // Push self to idle stack 191 push(this->cltr->idles, * this);191 mark_idle(this->cltr->procs, * this); 192 192 193 193 // Confirm the ready-queue is empty … … 195 195 if( readyThread ) { 196 196 // A thread was found, cancel the halt 197 remove(this->cltr->idles, * this);197 mark_awake(this->cltr->procs, * this); 198 198 199 199 #if !defined(__CFA_NO_STATISTICS__) … … 225 225 226 226 // We were woken up, remove self from idle 227 remove(this->cltr->idles, * this);227 mark_awake(this->cltr->procs, * this); 228 228 229 229 // DON'T just proceed, start looking again … … 617 617 unsigned idle; 618 618 unsigned total; 619 [idle, total, p] = query (this->idles);619 [idle, total, p] = query_idles(this->procs); 620 620 621 621 // If no one is sleeping, we are done … … 654 654 } 655 655 656 static void push (__cluster_idles& this, processor & proc) {656 static void mark_idle(__cluster_proc_list & this, processor & proc) { 657 657 /* paranoid */ verify( ! __preemption_enabled() ); 658 658 lock( this ); … … 660 660 /* paranoid */ verify( this.idle <= this.total ); 661 661 662 insert_first(this. list, proc);662 insert_first(this.idles, proc); 663 663 unlock( this ); 664 664 /* paranoid */ verify( ! __preemption_enabled() ); 665 665 } 666 666 667 static void remove(__cluster_idles& this, processor & proc) {667 static void mark_awake(__cluster_proc_list & this, processor & proc) { 668 668 /* paranoid */ verify( ! __preemption_enabled() ); 669 669 lock( this ); … … 676 676 } 677 677 678 static [unsigned idle, unsigned total, * processor] query( & __cluster_idles this ) { 678 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) { 679 /* paranoid */ verify( ! __preemption_enabled() ); 680 /* paranoid */ verify( ready_schedule_islocked() ); 681 679 682 for() { 680 683 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST); … … 682 685 unsigned idle = this.idle; 683 686 unsigned total = this.total; 684 processor * proc = &this. list`first;687 processor * proc = &this.idles`first; 685 688 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it 686 689 asm volatile("": : :"memory"); … … 688 691 return [idle, total, proc]; 689 692 } 693 694 /* paranoid */ verify( ready_schedule_islocked() ); 695 /* paranoid */ verify( ! __preemption_enabled() ); 690 696 } 691 697 -
libcfa/src/concurrency/kernel.hfa
ra7504db5 r6a9b12b 180 180 181 181 // Idle Sleep 182 struct __cluster_ idles{182 struct __cluster_proc_list { 183 183 // Spin lock protecting the queue 184 184 volatile uint64_t lock; … … 191 191 192 192 // List of idle processors 193 dlist(processor, processor) list;193 dlist(processor, processor) idles; 194 194 }; 195 195 … … 207 207 208 208 // List of idle processors 209 __cluster_ idles idles;209 __cluster_proc_list procs; 210 210 211 211 // List of threads -
libcfa/src/concurrency/kernel/startup.cfa
ra7504db5 r6a9b12b 491 491 // Register and Lock the RWlock so no-one pushes/pops while we are changing the queue 492 492 uint_fast32_t last_size = ready_mutate_register((__processor_id_t*)&this); 493 int target = this.cltr-> idles.total += 1u;493 int target = this.cltr->procs.total += 1u; 494 494 495 495 // Adjust the ready queue size … … 506 506 // Lock the RWlock so no-one pushes/pops while we are changing the queue 507 507 uint_fast32_t last_size = ready_mutate_lock(); 508 int target = this.cltr-> idles.total -= 1u;508 int target = this.cltr->procs.total -= 1u; 509 509 510 510 // Adjust the ready queue size … … 555 555 //----------------------------------------------------------------------------- 556 556 // Cluster 557 static void ?{}(__cluster_ idles& this) {557 static void ?{}(__cluster_proc_list & this) { 558 558 this.lock = 0; 559 559 this.idle = 0; 560 560 this.total = 0; 561 (this.list){};562 561 } 563 562 -
libcfa/src/concurrency/kernel_private.hfa
ra7504db5 r6a9b12b 247 247 //----------------------------------------------------------------------- 248 248 // Cluster idle lock/unlock 249 static inline void lock(__cluster_ idles& this) {249 static inline void lock(__cluster_proc_list & this) { 250 250 /* paranoid */ verify( ! __preemption_enabled() ); 251 251 … … 268 268 } 269 269 270 static inline void unlock(__cluster_ idles& this) {270 static inline void unlock(__cluster_proc_list & this) { 271 271 /* paranoid */ verify( ! __preemption_enabled() ); 272 272
Note: See TracChangeset
for help on using the changeset viewer.