Changeset 949339b for libcfa/src/concurrency/kernel.cfa
- Timestamp:
- Sep 27, 2021, 2:09:55 PM (4 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, master, pthread-emulation, qualifiedEnum
- Children:
- cc287800
- Parents:
- 4e28d2e9 (diff), 056cbdb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
r4e28d2e9 r949339b 22 22 #include <errno.h> 23 23 #include <stdio.h> 24 #include <string.h> 24 25 #include <signal.h> 25 26 #include <unistd.h> … … 31 32 #include "kernel_private.hfa" 32 33 #include "preemption.hfa" 34 #include "strstream.hfa" 35 #include "device/cpu.hfa" 33 36 34 37 //Private includes … … 231 234 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 232 235 233 __disable_interrupts_hard(); 234 eventfd_t val; 235 eventfd_read( this->idle, &val ); 236 __enable_interrupts_hard(); 236 { 237 eventfd_t val; 238 ssize_t ret = read( this->idle, &val, sizeof(val) ); 239 if(ret < 0) { 240 switch((int)errno) { 241 case EAGAIN: 242 #if EAGAIN != EWOULDBLOCK 243 case EWOULDBLOCK: 244 #endif 245 case EINTR: 246 // No need to do anything special here, just assume it's a legitimate wake-up 247 break; 248 default: 249 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 250 } 251 } 252 } 237 253 238 254 #if !defined(__CFA_NO_STATISTICS__) … … 325 341 } 326 342 327 343 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 0\n", this->unique_id, rdtscl()); ) 328 344 __cfadbg_print_safe(runtime_core, "Kernel : core %p waiting on eventfd %d\n", this, this->idle); 329 345 330 // __disable_interrupts_hard(); 331 eventfd_t val; 332 eventfd_read( this->idle, &val ); 333 // __enable_interrupts_hard(); 346 { 347 eventfd_t val; 348 ssize_t ret = read( this->idle, &val, sizeof(val) ); 349 if(ret < 0) { 350 switch((int)errno) { 351 case EAGAIN: 352 #if EAGAIN != EWOULDBLOCK 353 case EWOULDBLOCK: 354 #endif 355 case EINTR: 356 // No need to do anything special here, just assume it's a legitimate wake-up 357 break; 358 default: 359 abort( "KERNEL : internal error, read failure on idle eventfd, error(%d) %s.", (int)errno, strerror( (int)errno ) ); 360 } 361 } 362 } 334 363 335 364 __STATS( if(this->print_halts) __cfaabi_bits_print_safe( STDOUT_FILENO, "PH:%d - %lld 1\n", this->unique_id, rdtscl()); ) … … 393 422 /* paranoid */ verifyf( thrd_dst->link.next == 0p, "Expected null got %p", thrd_dst->link.next ); 394 423 __builtin_prefetch( thrd_dst->context.SP ); 395 396 int curr = __kernel_getcpu();397 if(thrd_dst->last_cpu != curr) {398 int64_t l = thrd_dst->last_cpu;399 int64_t c = curr;400 int64_t v = (l << 32) | c;401 __push_stat( __tls_stats(), v, false, "Processor", this );402 }403 404 thrd_dst->last_cpu = curr;405 424 406 425 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); … … 457 476 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 458 477 // The thread was preempted, reschedule it and reset the flag 459 schedule_thread$( thrd_dst );478 schedule_thread$( thrd_dst, UNPARK_LOCAL ); 460 479 break RUNNING; 461 480 } … … 541 560 // Scheduler routines 542 561 // KERNEL ONLY 543 static void __schedule_thread( thread$ * thrd ) {562 static void __schedule_thread( thread$ * thrd, unpark_hint hint ) { 544 563 /* paranoid */ verify( ! __preemption_enabled() ); 545 564 /* paranoid */ verify( ready_schedule_islocked()); … … 561 580 // Dereference the thread now because once we push it, there is not guaranteed it's still valid. 562 581 struct cluster * cl = thrd->curr_cluster; 563 __STATS(bool outside = thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; )582 __STATS(bool outside = hint == UNPARK_LOCAL && thrd->last_proc && thrd->last_proc != kernelTLS().this_processor; ) 564 583 565 584 // push the thread to the cluster ready-queue 566 push( cl, thrd, local);585 push( cl, thrd, hint ); 567 586 568 587 // variable thrd is no longer safe to use … … 589 608 } 590 609 591 void schedule_thread$( thread$ * thrd ) {610 void schedule_thread$( thread$ * thrd, unpark_hint hint ) { 592 611 ready_schedule_lock(); 593 __schedule_thread( thrd );612 __schedule_thread( thrd, hint ); 594 613 ready_schedule_unlock(); 595 614 } … … 642 661 } 643 662 644 void __kernel_unpark( thread$ * thrd ) {663 void __kernel_unpark( thread$ * thrd, unpark_hint hint ) { 645 664 /* paranoid */ verify( ! __preemption_enabled() ); 646 665 /* paranoid */ verify( ready_schedule_islocked()); … … 650 669 if(__must_unpark(thrd)) { 651 670 // Wake lost the race, 652 __schedule_thread( thrd );671 __schedule_thread( thrd, hint ); 653 672 } 654 673 … … 657 676 } 658 677 659 void unpark( thread$ * thrd ) {678 void unpark( thread$ * thrd, unpark_hint hint ) { 660 679 if( !thrd ) return; 661 680 … … 663 682 disable_interrupts(); 664 683 // Wake lost the race, 665 schedule_thread$( thrd );684 schedule_thread$( thrd, hint ); 666 685 enable_interrupts(false); 667 686 }
Note:
See TracChangeset
for help on using the changeset viewer.