Changeset 8e4aa05 for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Mar 4, 2021, 7:40:25 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 77d601f
- Parents:
- 342af53 (diff), a5040fe (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r342af53 r8e4aa05 22 22 #include <signal.h> 23 23 #include <unistd.h> 24 extern "C" { 25 #include <sys/eventfd.h> 26 } 24 27 25 28 //CFA Includes … … 114 117 static [unsigned idle, unsigned total, * processor] query( & __cluster_idles idles ); 115 118 119 extern void __cfa_io_start( processor * ); 120 extern void __cfa_io_drain( processor * ); 121 extern void __cfa_io_flush( processor * ); 122 extern void __cfa_io_stop ( processor * ); 123 static inline void __maybe_io_drain( processor * ); 124 125 extern void __disable_interrupts_hard(); 126 extern void __enable_interrupts_hard(); 116 127 117 128 //============================================================================================= … … 129 140 verify(this); 130 141 142 __cfa_io_start( this ); 143 131 144 __cfadbg_print_safe(runtime_core, "Kernel : core %p starting\n", this); 132 145 #if !defined(__CFA_NO_STATISTICS__) … … 140 153 preemption_scope scope = { this }; 141 154 155 #if !defined(__CFA_NO_STATISTICS__) 156 unsigned long long last_tally = rdtscl(); 157 #endif 158 159 142 160 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); 143 161 … … 145 163 MAIN_LOOP: 146 164 for() { 165 // Check if there is pending io 166 __maybe_io_drain( this ); 167 147 168 // Try to get the next thread 148 169 readyThread = __next_thread( this->cltr ); 149 170 150 171 if( !readyThread ) { 172 __cfa_io_flush( this ); 151 173 readyThread = __next_thread_slow( this->cltr ); 152 174 } … … 184 206 #endif 185 207 186 wait( this->idle ); 208 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 209 210 __disable_interrupts_hard(); 211 eventfd_t val; 212 eventfd_read( this->idle, &val ); 213 __enable_interrupts_hard(); 187 214 188 215 #if !defined(__CFA_NO_STATISTICS__) … … 201 228 /* paranoid */ verify( readyThread ); 202 229 230 // Reset io dirty bit 231 this->io.dirty = false; 232 203 233 // We found a thread run it 204 234 __run_thread(this, readyThread); … … 206 236 // Are we done? 207 237 if( __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST) ) break MAIN_LOOP; 238 239 #if !defined(__CFA_NO_STATISTICS__) 240 unsigned long long curr = rdtscl(); 241 if(curr > (last_tally + 500000000)) { 242 __tally_stats(this->cltr->stats, __cfaabi_tls.this_stats); 243 last_tally = curr; 244 } 245 #endif 246 247 if(this->io.pending && !this->io.dirty) { 248 __cfa_io_flush( this ); 249 } 208 250 } 209 251 … … 211 253 } 212 254 213 V( this->terminated ); 255 __cfa_io_stop( this ); 256 257 post( this->terminated ); 258 214 259 215 260 if(this == mainProcessor) { … … 234 279 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next ); 235 280 __builtin_prefetch( thrd_dst->context.SP ); 281 282 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); 236 283 237 284 $coroutine * proc_cor = get_coroutine(this->runner); … … 316 363 // Just before returning to the processor, set the processor coroutine to active 317 364 proc_cor->state = Active; 365 366 __cfadbg_print_safe(runtime_core, "Kernel : core %p finished running thread %p\n", this, thrd_dst); 318 367 319 368 /* paranoid */ verify( ! __preemption_enabled() ); … … 550 599 551 600 // We found a processor, wake it up 552 post( p->idle ); 601 eventfd_t val; 602 val = 1; 603 eventfd_write( p->idle, val ); 553 604 554 605 #if !defined(__CFA_NO_STATISTICS__) … … 568 619 disable_interrupts(); 569 620 /* paranoid */ verify( ! __preemption_enabled() ); 570 post( this->idle ); 621 eventfd_t val; 622 val = 1; 623 eventfd_write( this->idle, val ); 571 624 enable_interrupts( __cfaabi_dbg_ctx ); 572 625 } … … 611 664 // Unexpected Terminating logic 612 665 //============================================================================================= 613 static __spinlock_t kernel_abort_lock; 614 static bool kernel_abort_called = false; 615 616 void * kernel_abort(void) __attribute__ ((__nothrow__)) { 617 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 618 // the globalAbort flag is true. 619 lock( kernel_abort_lock __cfaabi_dbg_ctx2 ); 620 621 // disable interrupts, it no longer makes sense to try to interrupt this processor 622 disable_interrupts(); 623 624 // first task to abort ? 625 if ( kernel_abort_called ) { // not first task to abort ? 626 unlock( kernel_abort_lock ); 627 628 sigset_t mask; 629 sigemptyset( &mask ); 630 sigaddset( &mask, SIGALRM ); // block SIGALRM signals 631 sigaddset( &mask, SIGUSR1 ); // block SIGALRM signals 632 sigsuspend( &mask ); // block the processor to prevent further damage during abort 633 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 634 } 635 else { 636 kernel_abort_called = true; 637 unlock( kernel_abort_lock ); 638 } 639 640 return __cfaabi_tls.this_thread; 641 } 642 643 void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) { 644 $thread * thrd = ( $thread * ) kernel_data; 666 void __kernel_abort_msg( char * abort_text, int abort_text_size ) { 667 $thread * thrd = __cfaabi_tls.this_thread; 645 668 646 669 if(thrd) { … … 662 685 } 663 686 664 int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) {665 return get_coroutine( kernelTLS().this_thread) == get_coroutine(mainThread) ? 4 : 2;687 int __kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { 688 return get_coroutine(__cfaabi_tls.this_thread) == get_coroutine(mainThread) ? 4 : 2; 666 689 } 667 690 … … 681 704 // Kernel Utilities 682 705 //============================================================================================= 683 //----------------------------------------------------------------------------- 684 // Locks 685 void ?{}( semaphore & this, int count = 1 ) { 686 (this.lock){}; 687 this.count = count; 688 (this.waiting){}; 689 } 690 void ^?{}(semaphore & this) {} 691 692 bool P(semaphore & this) with( this ){ 693 lock( lock __cfaabi_dbg_ctx2 ); 694 count -= 1; 695 if ( count < 0 ) { 696 // queue current task 697 append( waiting, active_thread() ); 698 699 // atomically release spin lock and block 700 unlock( lock ); 701 park(); 702 return true; 703 } 704 else { 705 unlock( lock ); 706 return false; 707 } 708 } 709 710 bool V(semaphore & this) with( this ) { 711 $thread * thrd = 0p; 712 lock( lock __cfaabi_dbg_ctx2 ); 713 count += 1; 714 if ( count <= 0 ) { 715 // remove task at head of waiting list 716 thrd = pop_head( waiting ); 717 } 718 719 unlock( lock ); 720 721 // make new owner 722 unpark( thrd ); 723 724 return thrd != 0p; 725 } 726 727 bool V(semaphore & this, unsigned diff) with( this ) { 728 $thread * thrd = 0p; 729 lock( lock __cfaabi_dbg_ctx2 ); 730 int release = max(-count, (int)diff); 731 count += diff; 732 for(release) { 733 unpark( pop_head( waiting ) ); 734 } 735 736 unlock( lock ); 737 738 return thrd != 0p; 706 #if defined(CFA_HAVE_LINUX_IO_URING_H) 707 #include "io/types.hfa" 708 #endif 709 710 static inline void __maybe_io_drain( processor * proc ) { 711 #if defined(CFA_HAVE_LINUX_IO_URING_H) 712 __cfadbg_print_safe(runtime_core, "Kernel : core %p checking io for ring %d\n", proc, proc->io.ctx->fd); 713 714 // Check if we should drain the queue 715 $io_context * ctx = proc->io.ctx; 716 unsigned head = *ctx->cq.head; 717 unsigned tail = *ctx->cq.tail; 718 if(head != tail) __cfa_io_drain( proc ); 719 #endif 739 720 } 740 721
Note:
See TracChangeset
for help on using the changeset viewer.