Changeset 7770cc8 for libcfa/src/concurrency
- Timestamp:
- Nov 24, 2021, 9:47:56 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, master, pthread-emulation, qualifiedEnum, stuck-waitfor-destruct
- Children:
- 5235d49
- Parents:
- 94647b0b (diff), 3cc1111 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 9 edited
-
io.cfa (modified) (7 diffs)
-
io/setup.cfa (modified) (3 diffs)
-
io/types.hfa (modified) (1 diff)
-
kernel.cfa (modified) (17 diffs)
-
kernel.hfa (modified) (2 diffs)
-
kernel/startup.cfa (modified) (5 diffs)
-
kernel_private.hfa (modified) (4 diffs)
-
monitor.hfa (modified) (1 diff)
-
mutex_stmt.hfa (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/io.cfa
r94647b0b r7770cc8 133 133 } 134 134 135 void __cfa_io_flush( processor * proc) {135 bool __cfa_io_flush( processor * proc, bool wait ) { 136 136 /* paranoid */ verify( ! __preemption_enabled() ); 137 137 /* paranoid */ verify( proc ); … … 141 141 $io_context & ctx = *proc->io.ctx; 142 142 143 // for(i; 2) {144 // unsigned idx = proc->rdq.id + i;145 // cltr->ready_queue.lanes.tscs[idx].tv = -1ull;146 // }147 148 143 __ioarbiter_flush( ctx ); 149 144 150 145 __STATS__( true, io.calls.flush++; ) 151 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, 0, 0, (sigset_t *)0p, _NSIG / 8);146 int ret = syscall( __NR_io_uring_enter, ctx.fd, ctx.sq.to_submit, wait ? 1 : 0, 0, (sigset_t *)0p, _NSIG / 8); 152 147 if( ret < 0 ) { 153 148 switch((int)errno) { … … 157 152 // Update statistics 158 153 __STATS__( false, io.calls.errors.busy ++; ) 159 // for(i; 2) { 160 // unsigned idx = proc->rdq.id + i; 161 // cltr->ready_queue.lanes.tscs[idx].tv = rdtscl(); 162 // } 163 return; 154 return false; 164 155 default: 165 156 abort( "KERNEL ERROR: IO_URING SYSCALL - (%d) %s\n", (int)errno, strerror(errno) ); … … 182 173 183 174 ctx.proc->io.pending = false; 184 185 175 ready_schedule_lock(); 186 __cfa_io_drain( proc );176 bool ret = __cfa_io_drain( proc ); 187 177 ready_schedule_unlock(); 188 // for(i; 2) { 189 // unsigned idx = proc->rdq.id + i; 190 // cltr->ready_queue.lanes.tscs[idx].tv = rdtscl(); 191 // } 178 return ret; 192 179 } 193 180 … … 293 280 } 294 281 295 296 282 //============================================================================================= 297 283 // submission … … 311 297 // Make the sqes visible to the submitter 312 298 __atomic_store_n(sq.kring.tail, tail + have, __ATOMIC_RELEASE); 313 sq.to_submit ++;299 sq.to_submit += have; 314 300 315 301 ctx->proc->io.pending = true; 316 302 ctx->proc->io.dirty = true; 317 303 if(sq.to_submit > 30 || !lazy) { 318 __cfa_io_flush( ctx->proc );304 __cfa_io_flush( ctx->proc, false ); 319 305 } 320 306 } … … 515 501 } 516 502 } 503 504 #if defined(IO_URING_IDLE) 505 bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd) { 506 $io_context * ctx = proc->io.ctx; 507 /* paranoid */ verify( ! __preemption_enabled() ); 508 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); 509 /* paranoid */ verify( ctx ); 510 511 __u32 idx; 512 struct io_uring_sqe * sqe; 513 514 // We can proceed to the fast path 515 if( !__alloc(ctx, &idx, 1) ) return false; 516 517 // Allocation was successful 518 __fill( &sqe, 1, &idx, ctx ); 519 520 sqe->opcode = IORING_OP_READ; 521 sqe->user_data = (uintptr_t)&future; 522 sqe->flags = 0; 523 sqe->ioprio = 0; 524 sqe->fd = 0; 525 sqe->off = 0; 526 sqe->fsync_flags = 0; 527 sqe->__pad2[0] = 0; 528 sqe->__pad2[1] = 0; 529 sqe->__pad2[2] = 0; 530 sqe->addr = (uintptr_t)buf; 531 sqe->len = sizeof(uint64_t); 532 533 asm volatile("": : :"memory"); 534 535 /* paranoid */ verify( sqe->user_data == (uintptr_t)&future ); 536 __submit( ctx, &idx, 1, true ); 537 538 /* paranoid */ verify( proc == __cfaabi_tls.this_processor ); 539 /* paranoid */ verify( ! __preemption_enabled() ); 540 } 541 #endif 517 542 #endif -
libcfa/src/concurrency/io/setup.cfa
r94647b0b r7770cc8 32 32 33 33 void __cfa_io_start( processor * proc ) {} 34 void __cfa_io_flush( processor * proc) {}34 bool __cfa_io_flush( processor * proc, bool ) {} 35 35 void __cfa_io_stop ( processor * proc ) {} 36 36 … … 111 111 this.ext_sq.empty = true; 112 112 (this.ext_sq.queue){}; 113 __io_uring_setup( this, cl.io.params, proc->idle );113 __io_uring_setup( this, cl.io.params, proc->idle_fd ); 114 114 __cfadbg_print_safe(io_core, "Kernel I/O : Created ring for io_context %u (%p)\n", this.fd, &this); 115 115 } … … 220 220 cq.cqes = (struct io_uring_cqe *)(((intptr_t)cq.ring_ptr) + params.cq_off.cqes); 221 221 222 // Step 4 : eventfd 223 // io_uring_register is so f*cking slow on some machine that it 224 // will never succeed if preemption isn't hard blocked 225 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 226 227 __disable_interrupts_hard(); 228 229 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 230 if (ret < 0) { 231 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 232 } 233 234 __enable_interrupts_hard(); 235 236 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); 222 #if !defined(IO_URING_IDLE) 223 // Step 4 : eventfd 224 // io_uring_register is so f*cking slow on some machine that it 225 // will never succeed if preemption isn't hard blocked 226 __cfadbg_print_safe(io_core, "Kernel I/O : registering %d for completion with ring %d\n", procfd, fd); 227 228 __disable_interrupts_hard(); 229 230 int ret = syscall( __NR_io_uring_register, fd, IORING_REGISTER_EVENTFD, &procfd, 1); 231 if (ret < 0) { 232 abort("KERNEL ERROR: IO_URING EVENTFD REGISTER - %s\n", strerror(errno)); 233 } 234 235 __enable_interrupts_hard(); 236 237 __cfadbg_print_safe(io_core, "Kernel I/O : registered %d for completion with ring %d\n", procfd, fd); 238 #endif 237 239 238 240 // some paranoid checks -
libcfa/src/concurrency/io/types.hfa
r94647b0b r7770cc8 185 185 186 186 // Wait for the future to be fulfilled 187 bool wait ( io_future_t & this ) {188 return wait(this.self);189 }187 bool wait ( io_future_t & this ) { return wait (this.self); } 188 void reset ( io_future_t & this ) { return reset (this.self); } 189 bool available( io_future_t & this ) { return available(this.self); } 190 190 } -
libcfa/src/concurrency/kernel.cfa
r94647b0b r7770cc8 34 34 #include "strstream.hfa" 35 35 #include "device/cpu.hfa" 36 #include "io/types.hfa" 36 37 37 38 //Private includes … … 124 125 static void __wake_one(cluster * cltr); 125 126 126 static void mark_idle (__cluster_proc_list & idles, processor & proc); 127 static void idle_sleep(processor * proc, io_future_t & future, char buf[]); 128 static bool mark_idle (__cluster_proc_list & idles, processor & proc); 127 129 static void mark_awake(__cluster_proc_list & idles, processor & proc); 128 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list idles );129 130 130 131 extern void __cfa_io_start( processor * ); 131 132 extern bool __cfa_io_drain( processor * ); 132 extern void __cfa_io_flush( processor *);133 extern bool __cfa_io_flush( processor *, bool wait ); 133 134 extern void __cfa_io_stop ( processor * ); 134 135 static inline bool __maybe_io_drain( processor * ); 136 137 #if defined(IO_URING_IDLE) && defined(CFA_HAVE_LINUX_IO_URING_H) 138 extern bool __kernel_read(processor * proc, io_future_t & future, char buf[], int fd); 139 #endif 135 140 136 141 extern void __disable_interrupts_hard(); … … 148 153 /* paranoid */ verify( __preemption_enabled() ); 149 154 } 155 150 156 151 157 //============================================================================================= … … 163 169 verify(this); 164 170 171 io_future_t future; // used for idle sleep when io_uring is present 172 future.self.ptr = 1p; // mark it as already fulfilled so we know if there is a pending request or not 173 char buf[sizeof(uint64_t)]; 174 165 175 __cfa_io_start( this ); 166 176 … … 196 206 197 207 if( !readyThread ) { 198 __cfa_io_flush( this ); 208 __cfa_io_flush( this, false ); 209 199 210 readyThread = __next_thread_slow( this->cltr ); 200 211 } … … 210 221 211 222 // Push self to idle stack 212 mark_idle(this->cltr->procs, * this);223 if(!mark_idle(this->cltr->procs, * this)) continue MAIN_LOOP; 213 224 214 225 // Confirm the ready-queue is empty … … 226 237 } 227 238 228 #if !defined(__CFA_NO_STATISTICS__) 229 if(this->print_halts) { 230 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 239 idle_sleep( this, future, buf ); 240 241 // We were woken up, remove self from idle 242 mark_awake(this->cltr->procs, * this); 243 244 // DON'T just proceed, start looking again 245 continue MAIN_LOOP; 246 } 247 248 /* paranoid */ verify( readyThread ); 249 250 // Reset io dirty bit 251 this->io.dirty = false; 252 253 // We found a thread run it 254 __run_thread(this, readyThread); 255 256 // Are we done? 257 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 258 259 if(this->io.pending && !this->io.dirty) { 260 __cfa_io_flush( this, false ); 261 } 262 263 #else 264 #warning new kernel loop 265 SEARCH: { 266 /* paranoid */ verify( ! __preemption_enabled() ); 267 268 // First, lock the scheduler since we are searching for a thread 269 ready_schedule_lock(); 270 271 // Try to get the next thread 272 readyThread = pop_fast( this->cltr ); 273 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 274 275 // If we can't find a thread, might as well flush any outstanding I/O 276 if(this->io.pending) { __cfa_io_flush( this, false ); } 277 278 // Spin a little on I/O, just in case 279 for(5) { 280 __maybe_io_drain( this ); 281 readyThread = pop_fast( this->cltr ); 282 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 283 } 284 285 // no luck, try stealing a few times 286 for(5) { 287 if( __maybe_io_drain( this ) ) { 288 readyThread = pop_fast( this->cltr ); 289 } else { 290 readyThread = pop_slow( this->cltr ); 231 291 } 232 #endif 233 234 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 292 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 293 } 294 295 // still no luck, search for a thread 296 readyThread = pop_search( this->cltr ); 297 if(readyThread) { ready_schedule_unlock(); break SEARCH; } 298 299 // Don't block if we are done 300 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) { 301 ready_schedule_unlock(); 302 break MAIN_LOOP; 303 } 304 305 __STATS( __tls_stats()->ready.sleep.halts++; ) 306 307 // Push self to idle stack 308 ready_schedule_unlock(); 309 if(!mark_idle(this->cltr->procs, * this)) goto SEARCH; 310 ready_schedule_lock(); 311 312 // Confirm the ready-queue is empty 313 __maybe_io_drain( this ); 314 readyThread = pop_search( this->cltr ); 315 ready_schedule_unlock(); 316 317 if( readyThread ) { 318 // A thread was found, cancel the halt 319 mark_awake(this->cltr->procs, * this); 320 321 __STATS( __tls_stats()->ready.sleep.cancels++; ) 322 323 // continue the main loop 324 break SEARCH; 325 } 326 327 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); ) 328 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 235 329 236 330 { 237 331 eventfd_t val; 238 ssize_t ret = read( this->idle , &val, sizeof(val) );332 ssize_t ret = read( this->idle_fd, &val, sizeof(val) ); 239 333 if(ret < 0) { 240 334 switch((int)errno) { … … 252 346 } 253 347 254 #if !defined(__CFA_NO_STATISTICS__) 255 if(this->print_halts) { 256 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 257 } 258 #endif 348 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); ) 259 349 260 350 // We were woken up, remove self from idle … … 265 355 } 266 356 267 /* paranoid */ verify( readyThread );268 269 // Reset io dirty bit270 this->io.dirty = false;271 272 // We found a thread run it273 __run_thread(this, readyThread);274 275 // Are we done?276 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;277 278 if(this->io.pending && !this->io.dirty) {279 __cfa_io_flush( this );280 }281 282 #else283 #warning new kernel loop284 SEARCH: {285 /* paranoid */ verify( ! __preemption_enabled() );286 287 // First, lock the scheduler since we are searching for a thread288 ready_schedule_lock();289 290 // Try to get the next thread291 readyThread = pop_fast( this->cltr );292 if(readyThread) { ready_schedule_unlock(); break SEARCH; }293 294 // If we can't find a thread, might as well flush any outstanding I/O295 if(this->io.pending) { __cfa_io_flush( this ); }296 297 // Spin a little on I/O, just in case298 for(5) {299 __maybe_io_drain( this );300 readyThread = pop_fast( this->cltr );301 if(readyThread) { ready_schedule_unlock(); break SEARCH; }302 }303 304 // no luck, try stealing a few times305 for(5) {306 if( __maybe_io_drain( this ) ) {307 readyThread = pop_fast( this->cltr );308 } else {309 readyThread = pop_slow( this->cltr );310 }311 if(readyThread) { ready_schedule_unlock(); break SEARCH; }312 }313 314 // still no luck, search for a thread315 readyThread = pop_search( this->cltr );316 if(readyThread) { ready_schedule_unlock(); break SEARCH; }317 318 // Don't block if we are done319 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP;320 321 __STATS( __tls_stats()->ready.sleep.halts++; )322 323 // Push self to idle stack324 ready_schedule_unlock();325 mark_idle(this->cltr->procs, * this);326 ready_schedule_lock();327 328 // Confirm the ready-queue is empty329 __maybe_io_drain( this );330 readyThread = pop_search( this->cltr );331 ready_schedule_unlock();332 333 if( readyThread ) {334 // A thread was found, cancel the halt335 mark_awake(this->cltr->procs, * this);336 337 __STATS( __tls_stats()->ready.sleep.cancels++; )338 339 // continue the main loop340 break SEARCH;341 }342 343 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); )344 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle);345 346 {347 eventfd_t val;348 ssize_t ret = read( this->idle, &val, sizeof(val) );349 if(ret < 0) {350 switch((int)errno) {351 case EAGAIN:352 #if EAGAIN != EWOULDBLOCK353 case EWOULDBLOCK:354 #endif355 case EINTR:356 // No need to do anything special here, just assume it's a legitimate wake-up357 break;358 default:359 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) );360 }361 }362 }363 364 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); )365 366 // We were woken up, remove self from idle367 mark_awake(this->cltr->procs, * this);368 369 // DON'T just proceed, start looking again370 continue MAIN_LOOP;371 }372 373 357 RUN_THREAD: 374 358 /* paranoid */ verify( ! __preemption_enabled() ); … … 385 369 386 370 if(this->io.pending && !this->io.dirty) { 387 __cfa_io_flush( this );371 __cfa_io_flush( this, false ); 388 372 } 389 373 … … 758 742 759 743 // Check if there is a sleeping processor 760 processor * p; 761 unsigned idle; 762 unsigned total; 763 [idle, total, p] = query_idles(this->procs); 744 int fd = __atomic_load_n(&this->procs.fd, __ATOMIC_SEQ_CST); 764 745 765 746 // If no one is sleeping, we are done 766 if( idle== 0 ) return;747 if( fd == 0 ) return; 767 748 768 749 // We found a processor, wake it up 769 750 eventfd_t val; 770 751 val = 1; 771 eventfd_write( p->idle, val );752 eventfd_write( fd, val ); 772 753 773 754 #if !defined(__CFA_NO_STATISTICS__) … … 794 775 eventfd_t val; 795 776 val = 1; 796 eventfd_write( this->idle , val );777 eventfd_write( this->idle_fd, val ); 797 778 __enable_interrupts_checked(); 798 779 } 799 780 800 static void mark_idle(__cluster_proc_list & this, processor & proc) { 801 /* paranoid */ verify( ! __preemption_enabled() ); 802 lock( this ); 781 static void idle_sleep(processor * this, io_future_t & future, char buf[]) { 782 #if !defined(IO_URING_IDLE) || !defined(CFA_HAVE_LINUX_IO_URING_H) 783 #if !defined(__CFA_NO_STATISTICS__) 784 if(this->print_halts) { 785 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); 786 } 787 #endif 788 789 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle_fd); 790 791 { 792 eventfd_t val; 793 ssize_t ret = read( this->idle_fd, &val, sizeof(val) ); 794 if(ret < 0) { 795 switch((int)errno) { 796 case EAGAIN: 797 #if EAGAIN != EWOULDBLOCK 798 case EWOULDBLOCK: 799 #endif 800 case EINTR: 801 // No need to do anything special here, just assume it's a legitimate wake-up 802 break; 803 default: 804 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 805 } 806 } 807 } 808 809 #if !defined(__CFA_NO_STATISTICS__) 810 if(this->print_halts) { 811 __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); 812 } 813 #endif 814 #else 815 #if !defined(CFA_HAVE_IORING_OP_READ) 816 #error this is only implemented if the read is present 817 #endif 818 // Do we already have a pending read 819 if(available(future)) { 820 // There is no pending read, we need to add one 821 reset(future); 822 823 __kernel_read(this, future, buf, this->idle_fd ); 824 } 825 826 __cfa_io_flush( this, true ); 827 #endif 828 } 829 830 static bool mark_idle(__cluster_proc_list & this, processor & proc) { 831 /* paranoid */ verify( ! __preemption_enabled() ); 832 if(!try_lock( this )) return false; 803 833 this.idle++; 804 834 /* paranoid */ verify( this.idle <= this.total ); 805 835 remove(proc); 806 836 insert_first(this.idles, proc); 837 838 __atomic_store_n(&this.fd, proc.idle_fd, __ATOMIC_SEQ_CST); 807 839 unlock( this ); 808 840 /* paranoid */ verify( ! __preemption_enabled() ); 841 842 return true; 809 843 } 810 844 … … 816 850 remove(proc); 817 851 insert_last(this.actives, proc); 852 853 { 854 int fd = 0; 855 if(!this.idles`isEmpty) fd = this.idles`first.idle_fd; 856 __atomic_store_n(&this.fd, fd, __ATOMIC_SEQ_CST); 857 } 858 818 859 unlock( this ); 819 /* paranoid */ verify( ! __preemption_enabled() );820 }821 822 static [unsigned idle, unsigned total, * processor] query_idles( & __cluster_proc_list this ) {823 /* paranoid */ verify( ! __preemption_enabled() );824 /* paranoid */ verify( ready_schedule_islocked() );825 826 for() {827 uint64_t l = __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST);828 if( 1 == (l % 2) ) { Pause(); continue; }829 unsigned idle = this.idle;830 unsigned total = this.total;831 processor * proc = &this.idles`first;832 // Compiler fence is unnecessary, but gcc-8 and older incorrectly reorder code without it833 asm volatile("": : :"memory");834 if(l != __atomic_load_n(&this.lock, __ATOMIC_SEQ_CST)) { Pause(); continue; }835 return [idle, total, proc];836 }837 838 /* paranoid */ verify( ready_schedule_islocked() );839 860 /* paranoid */ verify( ! __preemption_enabled() ); 840 861 } … … 898 919 if(head == tail) return false; 899 920 #if OLD_MAIN 900 ready_schedule_lock();901 ret = __cfa_io_drain( proc );902 ready_schedule_unlock();921 ready_schedule_lock(); 922 ret = __cfa_io_drain( proc ); 923 ready_schedule_unlock(); 903 924 #else 904 925 ret = __cfa_io_drain( proc ); 905 #endif926 #endif 906 927 #endif 907 928 return ret; … … 939 960 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); 940 961 /* paranoid */ verify( it->local_data->this_stats ); 962 // __print_stats( it->local_data->this_stats, cltr->print_stats, "Processor", it->name, (void*)it ); 941 963 __tally_stats( cltr->stats, it->local_data->this_stats ); 942 964 it = &(*it)`next; … … 948 970 // this doesn't solve all problems but does solve many 949 971 // so it's probably good enough 972 disable_interrupts(); 950 973 uint_fast32_t last_size = ready_mutate_lock(); 951 974 … … 955 978 // Unlock the RWlock 956 979 ready_mutate_unlock( last_size ); 980 enable_interrupts(); 957 981 } 958 982 -
libcfa/src/concurrency/kernel.hfa
r94647b0b r7770cc8 100 100 101 101 // Idle lock (kernel semaphore) 102 int idle ;102 int idle_fd; 103 103 104 104 // Termination synchronisation (user semaphore) … … 195 195 struct __cluster_proc_list { 196 196 // Spin lock protecting the queue 197 volatile uint64_t lock; 197 __spinlock_t lock; 198 199 // FD to use to wake a processor 200 volatile int fd; 198 201 199 202 // Total number of processors -
libcfa/src/concurrency/kernel/startup.cfa
r94647b0b r7770cc8 100 100 // Other Forward Declarations 101 101 extern void __wake_proc(processor *); 102 extern int cfa_main_returned; // from interpose.cfa 102 103 103 104 //----------------------------------------------------------------------------- … … 268 269 269 270 static void __kernel_shutdown(void) { 271 if(!cfa_main_returned) return; 270 272 /* paranoid */ verify( __preemption_enabled() ); 271 273 disable_interrupts(); … … 525 527 this.local_data = 0p; 526 528 527 this.idle = eventfd(0, 0);528 if (idle < 0) {529 this.idle_fd = eventfd(0, 0); 530 if (idle_fd < 0) { 529 531 abort("KERNEL ERROR: PROCESSOR EVENTFD - %s\n", strerror(errno)); 530 532 } … … 540 542 // Not a ctor, it just preps the destruction but should not destroy members 541 543 static void deinit(processor & this) { 542 close(this.idle );544 close(this.idle_fd); 543 545 } 544 546 … … 582 584 // Cluster 583 585 static void ?{}(__cluster_proc_list & this) { 584 this. lock= 0;586 this.fd = 0; 585 587 this.idle = 0; 586 588 this.total = 0; -
libcfa/src/concurrency/kernel_private.hfa
r94647b0b r7770cc8 39 39 } 40 40 41 // #define IO_URING_IDLE 42 41 43 //----------------------------------------------------------------------------- 42 44 // Scheduler … … 149 151 __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE); 150 152 } 151 152 153 154 155 153 156 154 //----------------------------------------------------------------------- … … 268 266 ready_schedule_lock(); 269 267 270 // Simple counting lock, acquired, acquired by incrementing the counter 271 // to an odd number 272 for() { 273 uint64_t l = this.lock; 274 if( 275 (0 == (l % 2)) 276 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST) 277 ) return; 278 Pause(); 279 } 280 281 /* paranoid */ verify( ! __preemption_enabled() ); 268 lock( this.lock __cfaabi_dbg_ctx2 ); 269 270 /* paranoid */ verify( ! __preemption_enabled() ); 271 } 272 273 static inline bool try_lock(__cluster_proc_list & this) { 274 /* paranoid */ verify( ! __preemption_enabled() ); 275 276 // Start by locking the global RWlock so that we know no-one is 277 // adding/removing processors while we mess with the idle lock 278 ready_schedule_lock(); 279 280 if(try_lock( this.lock __cfaabi_dbg_ctx2 )) { 281 // success 282 /* paranoid */ verify( ! __preemption_enabled() ); 283 return true; 284 } 285 286 // failed to lock 287 ready_schedule_unlock(); 288 289 /* paranoid */ verify( ! __preemption_enabled() ); 290 return false; 282 291 } 283 292 … … 285 294 /* paranoid */ verify( ! __preemption_enabled() ); 286 295 287 /* paranoid */ verify( 1 == (this.lock % 2) ); 288 // Simple couting lock, release by incrementing to an even number 289 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST ); 296 unlock(this.lock); 290 297 291 298 // Release the global lock, which we acquired when locking -
libcfa/src/concurrency/monitor.hfa
r94647b0b r7770cc8 65 65 free( th ); 66 66 } 67 68 static inline forall( T & | sized(T) | { void ^?{}( T & mutex ); } ) 69 void adelete( T arr[] ) { 70 if ( arr ) { // ignore null 71 size_t dim = malloc_size( arr ) / sizeof( T ); 72 for ( int i = dim - 1; i >= 0; i -= 1 ) { // reverse allocation order, must be unsigned 73 ^(arr[i]){}; // run destructor 74 } // for 75 free( arr ); 76 } // if 77 } // adelete 67 78 68 79 //----------------------------------------------------------------------------- -
libcfa/src/concurrency/mutex_stmt.hfa
r94647b0b r7770cc8 1 1 #include "bits/algorithm.hfa" 2 #include <assert.h> 3 #include "invoke.h" 4 #include "stdlib.hfa" 5 #include <stdio.h> 2 #include "bits/defs.hfa" 6 3 7 4 //-----------------------------------------------------------------------------
Note:
See TracChangeset
for help on using the changeset viewer.