Changes in / [84a6e70:b14ec5f]
- Location:
- libcfa/src/concurrency
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r84a6e70 rb14ec5f 126 126 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 127 127 static void mark_awake(__cluster_proc_list & idles, processor & proc); 128 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );129 128 130 129 extern void __cfa_io_start( processor * ); … … 766 765 767 766 // Check if there is a sleeping processor 768 processor * p; 769 unsigned idle; 770 unsigned total; 771 [idle, total, p] = query_idles(this->procs); 767 int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST); 772 768 773 769 // If no one is sleeping, we are done 774 if( idle== 0 ) return;770 if( fd == 0 ) return; 775 771 776 772 // We found a processor, wake it up 777 773 eventfd_t val; 778 774 val = 1; 779 eventfd_write( p->idle, val );775 eventfd_write( fd, val ); 780 776 781 777 #if !defined(__CFA_NO_STATISTICS__) … … 813 809 remove(proc); 814 810 insert_first(this.idles, proc); 811 812 __atomic_store_n(&this.fd, proc.idle, __ATOMIC_SEQ_CST); 815 813 unlock( this ); 816 814 /* paranoid */ verify( ! __preemption_enabled() ); … … 826 824 remove(proc); 827 825 insert_last(this.actives, proc); 826 827 __atomic_store_n(&this.fd, this.idles`first.idle, __ATOMIC_SEQ_CST); 828 828 unlock( this ); 829 /* paranoid */ verify( ! __preemption_enabled() );830 }831 832 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) {833 /* paranoid */ verify( ! __preemption_enabled() );834 /* paranoid */ verify( ready_schedule_islocked() );835 836 for() {837 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);838 if( 1 == (l % 2) ) { Pause(); continue; }839 unsigned idle = this.idle;840 unsigned total = this.total;841 processor * proc = &this.idles`first;842 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it843 asm volatile("": : :"memory");844 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }845 return [idle, total, proc];846 }847 848 /* paranoid */ verify( ready_schedule_islocked() );849 829 /* paranoid */ verify( ! __preemption_enabled() ); 850 830 } … … 908 888 if(head == tail) return false; 909 889 #if OLD_MAIN 910 ready_schedule_lock();911 ret = __cfa_io_drain( proc );912 ready_schedule_unlock();890 ready_schedule_lock(); 891 ret = __cfa_io_drain( proc ); 892 ready_schedule_unlock(); 913 893 #else 914 894 ret = __cfa_io_drain( proc ); 915 #endif895 #endif 916 896 #endif 917 897 return ret; -
libcfa/src/concurrency/kernel.hfa
r84a6e70 rb14ec5f 195 195 struct __cluster_proc_list { 196 196 // Spin lock protecting the queue 197 volatile uint64_t lock; 197 __spinlock_t lock; 198 199 // FD to use to wake a processor 200 volatile int fd; 198 201 199 202 // Total number of processors -
libcfa/src/concurrency/kernel/startup.cfa
r84a6e70 rb14ec5f 584 584 // Cluster 585 585 static void ?{}(__cluster_proc_list & this) { 586 this. lock= 0;586 this.fd = 0; 587 587 this.idle = 0; 588 588 this.total = 0;
Note: See TracChangeset
for help on using the changeset viewer.