Changeset c60e5094
- Timestamp:
- Nov 19, 2021, 11:59:55 AM (3 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, master, pthread-emulation, qualifiedEnum
- Children:
- 813dfd86, cd4c605
- Parents:
- 3e417bf (diff), a633f6f (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r3e417bf rc60e5094 126 126 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 127 127 static void mark_awake(__cluster_proc_list & idles, processor & proc); 128 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );129 128 130 129 extern void __cfa_io_start( processor * ); … … 766 765 767 766 // Check if there is a sleeping processor 768 processor * p; 769 unsigned idle; 770 unsigned total; 771 [idle, total, p] = query_idles(this->procs); 767 int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST); 772 768 773 769 // If no one is sleeping, we are done 774 if( idle== 0 ) return;770 if( fd == 0 ) return; 775 771 776 772 // We found a processor, wake it up 777 773 eventfd_t val; 778 774 val = 1; 779 eventfd_write( p->idle, val );775 eventfd_write( fd, val ); 780 776 781 777 #if !defined(__CFA_NO_STATISTICS__) … … 813 809 remove(proc); 814 810 insert_first(this.idles, proc); 811 812 __atomic_store_n(&this.fd, proc.idle, __ATOMIC_SEQ_CST); 815 813 unlock( this ); 816 814 /* paranoid */ verify( ! __preemption_enabled() ); … … 826 824 remove(proc); 827 825 insert_last(this.actives, proc); 826 827 { 828 int fd = 0; 829 if(!this.idles`isEmpty) fd = this.idles`first.idle; 830 __atomic_store_n(&this.fd, fd, __ATOMIC_SEQ_CST); 831 } 832 828 833 unlock( this ); 829 /* paranoid */ verify( ! __preemption_enabled() );830 }831 832 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) {833 /* paranoid */ verify( ! __preemption_enabled() );834 /* paranoid */ verify( ready_schedule_islocked() );835 836 for() {837 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);838 if( 1 == (l % 2) ) { Pause(); continue; }839 unsigned idle = this.idle;840 unsigned total = this.total;841 processor * proc = &this.idles`first;842 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it843 asm volatile("": : :"memory");844 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }845 return [idle, total, proc];846 }847 848 /* paranoid */ verify( ready_schedule_islocked() );849 834 /* paranoid */ verify( ! __preemption_enabled() ); 850 835 } … … 908 893 if(head == tail) return false; 909 894 #if OLD_MAIN 910 ready_schedule_lock();911 ret = __cfa_io_drain( proc );912 ready_schedule_unlock();895 ready_schedule_lock(); 896 ret = __cfa_io_drain( proc ); 897 ready_schedule_unlock(); 913 898 #else 914 899 ret = __cfa_io_drain( proc ); 915 #endif900 #endif 916 901 #endif 917 902 return ret; -
libcfa/src/concurrency/kernel.hfa
r3e417bf rc60e5094 195 195 struct __cluster_proc_list { 196 196 // Spin lock protecting the queue 197 volatile uint64_t lock; 197 __spinlock_t lock; 198 199 // FD to use to wake a processor 200 volatile int fd; 198 201 199 202 // Total number of processors -
libcfa/src/concurrency/kernel/startup.cfa
r3e417bf rc60e5094 584 584 // Cluster 585 585 static void ?{}(__cluster_proc_list & this) { 586 this. lock= 0;586 this.fd = 0; 587 587 this.idle = 0; 588 588 this.total = 0; -
libcfa/src/concurrency/kernel_private.hfa
r3e417bf rc60e5094 268 268 ready_schedule_lock(); 269 269 270 // Simple counting lock, acquired, acquired by incrementing the counter 271 // to an odd number 272 for() { 273 uint64_t l = this.lock; 274 if( 275 (0 == (l % 2)) 276 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) 277 ) return; 278 Pause(); 279 } 270 lock( this.lock __cfaabi_dbg_ctx2 ); 280 271 281 272 /* paranoid */ verify( ! __preemption_enabled() ); … … 289 280 ready_schedule_lock(); 290 281 291 // Simple counting lock, acquired, acquired by incrementing the counter 292 // to an odd number 293 uint64_t l = this.lock; 294 if( 295 (0 == (l % 2)) 296 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) 297 ) { 282 if(try_lock( this.lock __cfaabi_dbg_ctx2 )) { 298 283 // success 299 284 /* paranoid */ verify( ! __preemption_enabled() ); … … 311 296 /* paranoid */ verify( ! __preemption_enabled() ); 312 297 313 /* paranoid */ verify( 1 == (this.lock % 2) ); 314 // Simple couting lock, release by incrementing to an even number 315 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST ); 298 unlock(this.lock); 316 299 317 300 // Release the global lock, which we acquired when locking
Note: See TracChangeset
for help on using the changeset viewer.