Changeset 3381ed7
- Timestamp:
- Feb 13, 2020, 4:18:07 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 50b8885
- Parents:
- 9f575ea
- Files:
-
- 4 added
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/containers.hfa
r9f575ea r3381ed7 146 146 static inline forall( dtype T | is_node(T) ) { 147 147 void ?{}( __queue(T) & this ) with( this ) { 148 head{ 0p };148 head{ 1p }; 149 149 tail{ &head }; 150 verify(*tail == 1p); 150 151 } 151 152 152 153 void append( __queue(T) & this, T * val ) with( this ) { 153 154 verify(tail != 0p); 155 verify(*tail == 1p); 154 156 *tail = val; 155 157 tail = &get_next( *val ); 158 *tail = 1p; 156 159 } 157 160 158 161 T * pop_head( __queue(T) & this ) { 162 verify(*this.tail == 1p); 159 163 T * head = this.head; 160 if( head ) {164 if( head != 1p ) { 161 165 this.head = get_next( *head ); 162 if( !get_next( *head )) {166 if( get_next( *head ) == 1p ) { 163 167 this.tail = &this.head; 164 168 } 165 169 get_next( *head ) = 0p; 166 } 167 return head; 170 verify(*this.tail == 1p); 171 return head; 172 } 173 verify(*this.tail == 1p); 174 return 0p; 168 175 } 169 176 … … 180 187 get_next( *val ) = 0p; 181 188 182 verify( (head == 0p) == (&head == tail) );183 verify( *tail == 0p );189 verify( (head == 1p) == (&head == tail) ); 190 verify( *tail == 1p ); 184 191 return val; 185 192 } -
libcfa/src/bits/locks.hfa
r9f575ea r3381ed7 60 60 } 61 61 62 extern void yield( unsigned int );63 64 62 static inline void ?{}( __spinlock_t & this ) { 65 63 this.lock = 0; … … 68 66 // Lock the spinlock, return false if already acquired 69 67 static inline bool try_lock ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) { 68 disable_interrupts(); 70 69 bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0); 71 70 if( result ) { 72 disable_interrupts();73 71 __cfaabi_dbg_record( this, caller ); 72 } else { 73 enable_interrupts_noPoll(); 74 74 } 75 75 return result; … … 83 83 #endif 84 84 85 disable_interrupts(); 85 86 for ( unsigned int i = 1;; i += 1 ) { 86 87 if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break; … … 98 99 #endif 99 100 } 100 disable_interrupts();101 101 __cfaabi_dbg_record( this, caller ); 102 102 } 103 103 104 104 static inline void unlock( __spinlock_t & this ) { 105 __atomic_clear( &this.lock, __ATOMIC_RELEASE ); 105 106 enable_interrupts_noPoll(); 106 __atomic_clear( &this.lock, __ATOMIC_RELEASE );107 107 } 108 108 -
libcfa/src/concurrency/invoke.h
r9f575ea r3381ed7 93 93 94 94 enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun, Reschedule }; 95 enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION }; 96 enum __Owner_Reason { __NO_OWNER, __ENTER_FREE, __ENTER_ACCEPT, __ENTER_DTOR_FREE, __ENTER_DTOR_ACCEPT, __ENTER_SIGNAL_BLOCK, __WAITFOR, __LEAVE, __LEAVE_THREAD, __WAIT }; 95 97 96 98 struct coroutine_desc { … … 134 136 struct thread_desc * owner; 135 137 138 enum __Owner_Reason owner_reason; 139 136 140 // queue of threads that are blocked waiting for the monitor 137 141 __queue_t(struct thread_desc) entry_queue; … … 165 169 // current execution status for coroutine 166 170 volatile int state; 167 intpreempted;171 enum __Preemption_Reason preempted; 168 172 169 173 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it -
libcfa/src/concurrency/kernel.cfa
r9f575ea r3381ed7 258 258 // Kernel Scheduling logic 259 259 //============================================================================================= 260 static thread_desc * nextThread(cluster * this); 260 261 static void runThread(processor * this, thread_desc * dst); 261 static void finishRunning(processor * this);262 262 static void halt(processor * this); 263 263 … … 286 286 287 287 if(readyThread) { 288 verify( ! kernelTLS.preemption_state.enabled ); 288 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 289 /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted); 290 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 289 291 290 292 runThread(this, readyThread); 291 293 292 verify( ! kernelTLS.preemption_state.enabled ); 293 294 //Some actions need to be taken from the kernel 295 finishRunning(this); 294 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 296 295 297 296 spin_count = 0; 298 297 } else { 299 298 // spin(this, &spin_count); 300 halt(this);299 // halt(this); 301 300 } 302 301 } … … 332 331 333 332 // Actually run the thread 334 RUN: 335 { 333 RUNNING: while(true) { 336 334 if(unlikely(thrd_dst->preempted)) { 337 thrd_dst->preempted = false; 335 thrd_dst->preempted = __NO_PREEMPTION; 336 verify(thrd_dst->state == Active || thrd_dst->state == Rerun || thrd_dst->state == Reschedule); 338 337 } else { 338 verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive); 339 339 thrd_dst->state = Active; 340 340 } 341 342 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 341 343 342 344 // set context switch to the thread that the processor is executing … … 344 346 CtxSwitch( &proc_cor->context, &thrd_dst->context ); 345 347 // when CtxSwitch returns we are back in the processor coroutine 346 } 347 348 // We just finished running a thread, there are a few things that could have happened. 349 // 1 - Regular case : the thread has blocked and now one has scheduled it yet. 350 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it. 351 // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way 352 // 4 - Preempted 353 // In case 1, we may have won a race so we can't write to the state again. 354 // In case 2, we lost the race so we now own the thread. 355 // In case 3, we lost the race but can just reschedule the thread. 356 357 if(unlikely(thrd_dst->preempted)) { 358 // The thread was preempted, reschedule it and reset the flag 359 ScheduleThread( thrd_dst ); 360 361 // Just before returning to the processor, set the processor coroutine to active 362 proc_cor->state = Active; 363 return; 364 } 365 366 // set state of processor coroutine to active and the thread to inactive 367 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST); 368 switch(old_state) { 369 case Halted: 370 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 371 thrd_dst->state = Halted; 372 break; 373 case Active: 374 // This is case 1, the regular case, nothing more is needed 375 break; 376 case Rerun: 377 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 378 // In this case, just run it again. 379 goto RUN; 380 case Reschedule: 381 // This is case 3, someone tried to run this before it finished blocking 382 // but it must go through the ready-queue 383 thrd_dst->state = Inactive; /*restore invariant */ 348 349 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 350 351 352 // We just finished running a thread, there are a few things that could have happened. 353 // 1 - Regular case : the thread has blocked and now one has scheduled it yet. 354 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it. 355 // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way 356 // 4 - Preempted 357 // In case 1, we may have won a race so we can't write to the state again. 358 // In case 2, we lost the race so we now own the thread. 359 // In case 3, we lost the race but can just reschedule the thread. 360 361 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 362 // The thread was preempted, reschedule it and reset the flag 384 363 ScheduleThread( thrd_dst ); 385 break; 386 case Inactive: 387 case Start: 388 case Primed: 389 default: 390 // This makes no sense, something is wrong abort 391 abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state); 364 break RUNNING; 365 } 366 367 // set state of processor coroutine to active and the thread to inactive 368 static_assert(sizeof(thrd_dst->state) == sizeof(int)); 369 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST); 370 switch(old_state) { 371 case Halted: 372 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 373 thrd_dst->state = Halted; 374 break RUNNING; 375 case Active: 376 // This is case 1, the regular case, nothing more is needed 377 break RUNNING; 378 case Rerun: 379 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 380 // In this case, just run it again. 381 continue RUNNING; 382 case Reschedule: 383 // This is case 3, someone tried to run this before it finished blocking 384 // but it must go through the ready-queue 385 thrd_dst->state = Inactive; /*restore invariant */ 386 ScheduleThread( thrd_dst ); 387 break RUNNING; 388 default: 389 // This makes no sense, something is wrong abort 390 abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state); 391 } 392 392 } 393 393 … … 398 398 // KERNEL_ONLY 399 399 static void returnToKernel() { 400 verify( ! kernelTLS.preemption_state.enabled );400 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 401 401 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 402 402 thread_desc * thrd_src = kernelTLS.this_thread; … … 416 416 } 417 417 418 verify( ! kernelTLS.preemption_state.enabled ); 419 } 420 421 // KERNEL_ONLY 422 // Once a thread has finished running, some of 423 // its final actions must be executed from the kernel 424 static void finishRunning(processor * this) with( this->finish ) { 425 verify( ! kernelTLS.preemption_state.enabled ); 426 verify( action_code == No_Action ); 427 choose( action_code ) { 428 case No_Action: 429 break; 430 case Release: 431 unlock( *lock ); 432 case Schedule: 433 ScheduleThread( thrd ); 434 case Release_Schedule: 435 unlock( *lock ); 436 ScheduleThread( thrd ); 437 case Release_Multi: 438 for(int i = 0; i < lock_count; i++) { 439 unlock( *locks[i] ); 440 } 441 case Release_Multi_Schedule: 442 for(int i = 0; i < lock_count; i++) { 443 unlock( *locks[i] ); 444 } 445 for(int i = 0; i < thrd_count; i++) { 446 ScheduleThread( thrds[i] ); 447 } 448 case Callback: 449 callback(); 450 default: 451 abort("KERNEL ERROR: Unexpected action to run after thread"); 452 } 418 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 453 419 } 454 420 … … 581 547 //----------------------------------------------------------------------------- 582 548 // Scheduler routines 583 584 549 // KERNEL ONLY 585 550 void ScheduleThread( thread_desc * thrd ) with( *thrd->curr_cluster ) { 586 551 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 587 /* paranoid */ verifyf( thrd->state == Inactive || thrd->state == Start || thrd->preempted, "state : %d, preempted %d\n", thrd->state, thrd->preempted); 552 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 553 /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 554 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 555 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule, 556 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 557 /* paranoid */ #endif 588 558 /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 589 559 … … 608 578 609 579 // KERNEL ONLY 610 thread_desc * nextThread(cluster * this) with( *this ) { 611 verify( ! kernelTLS.preemption_state.enabled ); 580 static thread_desc * nextThread(cluster * this) with( *this ) { 581 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 582 612 583 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 613 584 thread_desc * head = pop_head( ready_queue ); 614 585 unlock( ready_queue_lock ); 615 verify( ! kernelTLS.preemption_state.enabled ); 586 587 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 616 588 return head; 617 589 } 618 590 619 void BlockInternal() { 591 void unpark( thread_desc * thrd, bool must_yield ) { 592 if( !thrd ) return; 593 594 enum coroutine_state new_state = must_yield ? Reschedule : Rerun; 595 620 596 disable_interrupts(); 621 verify( ! kernelTLS.preemption_state.enabled ); 597 static_assert(sizeof(thrd->state) == sizeof(int)); 598 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, new_state, __ATOMIC_SEQ_CST); 599 switch(old_state) { 600 case Active: 601 // Wake won the race, the thread will reschedule/rerun itself 602 break; 603 case Inactive: 604 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 605 606 // Wake lost the race, 607 thrd->state = Inactive; 608 ScheduleThread( thrd ); 609 break; 610 case Rerun: 611 case Reschedule: 612 abort("More than one thread attempted to schedule thread %p\n", thrd); 613 break; 614 case Halted: 615 case Start: 616 case Primed: 617 default: 618 // This makes no sense, something is wrong abort 619 abort(); 620 } 621 enable_interrupts( __cfaabi_dbg_ctx ); 622 } 623 624 void park( void ) { 625 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 626 disable_interrupts(); 627 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 628 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION ); 629 622 630 returnToKernel(); 623 verify( ! kernelTLS.preemption_state.enabled ); 631 632 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 624 633 enable_interrupts( __cfaabi_dbg_ctx ); 625 } 626 627 void BlockInternal( __spinlock_t * lock ) { 634 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 635 636 } 637 638 // KERNEL ONLY 639 void LeaveThread() { 640 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 641 returnToKernel(); 642 } 643 644 // KERNEL ONLY 645 bool force_yield( __Preemption_Reason reason ) { 646 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 628 647 disable_interrupts(); 629 unlock( *lock ); 630 631 verify( ! kernelTLS.preemption_state.enabled ); 632 returnToKernel(); 633 verify( ! kernelTLS.preemption_state.enabled ); 634 635 enable_interrupts( __cfaabi_dbg_ctx ); 636 } 637 638 void BlockInternal( thread_desc * thrd ) { 639 disable_interrupts(); 640 WakeThread( thrd, false ); 641 642 verify( ! kernelTLS.preemption_state.enabled ); 643 returnToKernel(); 644 verify( ! kernelTLS.preemption_state.enabled ); 645 646 enable_interrupts( __cfaabi_dbg_ctx ); 647 } 648 649 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) { 650 disable_interrupts(); 651 unlock( *lock ); 652 WakeThread( thrd, false ); 653 654 verify( ! kernelTLS.preemption_state.enabled ); 655 returnToKernel(); 656 verify( ! kernelTLS.preemption_state.enabled ); 657 658 enable_interrupts( __cfaabi_dbg_ctx ); 659 } 660 661 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 662 disable_interrupts(); 663 for(int i = 0; i < count; i++) { 664 unlock( *locks[i] ); 665 } 666 667 verify( ! kernelTLS.preemption_state.enabled ); 668 returnToKernel(); 669 verify( ! kernelTLS.preemption_state.enabled ); 670 671 enable_interrupts( __cfaabi_dbg_ctx ); 672 } 673 674 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 675 disable_interrupts(); 676 for(int i = 0; i < lock_count; i++) { 677 unlock( *locks[i] ); 678 } 679 for(int i = 0; i < thrd_count; i++) { 680 WakeThread( thrds[i], false ); 681 } 682 683 verify( ! kernelTLS.preemption_state.enabled ); 684 returnToKernel(); 685 verify( ! kernelTLS.preemption_state.enabled ); 686 687 enable_interrupts( __cfaabi_dbg_ctx ); 688 } 689 690 void BlockInternal(__finish_callback_fptr_t callback) { 691 disable_interrupts(); 692 callback(); 693 694 verify( ! kernelTLS.preemption_state.enabled ); 695 returnToKernel(); 696 verify( ! kernelTLS.preemption_state.enabled ); 697 698 enable_interrupts( __cfaabi_dbg_ctx ); 699 } 700 701 // KERNEL ONLY 702 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 703 verify( ! kernelTLS.preemption_state.enabled ); 704 unlock( *lock ); 705 WakeThread( thrd, false ); 706 707 returnToKernel(); 648 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 649 650 thread_desc * thrd = kernelTLS.this_thread; 651 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule); 652 653 // SKULLDUGGERY: It is possible that we are preempting this thread just before 654 // it was going to park itself. If that is the case and it is already using the 655 // intrusive fields then we can't use them to preempt the thread 656 // If that is the case, abandon the preemption. 657 bool preempted = false; 658 if(thrd->next == 0p) { 659 preempted = true; 660 thrd->preempted = reason; 661 returnToKernel(); 662 } 663 664 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 665 enable_interrupts_noPoll(); 666 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 667 668 return preempted; 708 669 } 709 670 … … 939 900 940 901 // atomically release spin lock and block 941 BlockInternal( &lock ); 902 unlock( lock ); 903 park(); 942 904 } 943 905 else { … … 958 920 959 921 // make new owner 960 WakeThread( thrd, false);922 unpark( thrd ); 961 923 } 962 924 -
libcfa/src/concurrency/kernel_private.hfa
r9f575ea r3381ed7 32 32 33 33 void ScheduleThread( thread_desc * ) __attribute__((nonnull (1))); 34 static inline void WakeThread( thread_desc * thrd, bool must_yield ) {35 if( !thrd ) return;36 37 enum coroutine_state new_state = must_yield ? Reschedule : Rerun;38 39 disable_interrupts();40 static_assert(sizeof(thrd->state) == sizeof(int));41 enum coroutine_state old_state = (enum coroutine_state)__atomic_exchange_n((volatile int *)&thrd->state, (int)new_state, __ATOMIC_SEQ_CST);42 switch(old_state) {43 case Active:44 // Wake won the race, the thread will reschedule/rerun itself45 break;46 case Inactive:47 // Wake lost the race,48 thrd->state = Inactive;49 ScheduleThread( thrd );50 break;51 case Rerun:52 case Reschedule:53 abort("More than one thread attempted to schedule thread %p\n", thrd);54 break;55 case Halted:56 case Start:57 case Primed:58 default:59 // This makes no sense, something is wrong abort60 abort();61 }62 enable_interrupts( __cfaabi_dbg_ctx );63 }64 thread_desc * nextThread(cluster * this);65 34 66 35 //Block current thread and release/wake-up the following resources 67 void BlockInternal(void); 68 void BlockInternal(__spinlock_t * lock); 69 void BlockInternal(thread_desc * thrd); 70 void BlockInternal(__spinlock_t * lock, thread_desc * thrd); 71 void BlockInternal(__spinlock_t * locks [], unsigned short count); 72 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 73 void BlockInternal(__finish_callback_fptr_t callback); 74 void LeaveThread(__spinlock_t * lock, thread_desc * thrd); 36 void LeaveThread(); 37 38 bool force_yield( enum __Preemption_Reason ); 75 39 76 40 //----------------------------------------------------------------------------- -
libcfa/src/concurrency/monitor.cfa
r9f575ea r3381ed7 27 27 //----------------------------------------------------------------------------- 28 28 // Forward declarations 29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );29 static inline void set_owner ( monitor_desc * this, thread_desc * owner, enum __Owner_Reason ); 30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner, enum __Owner_Reason ); 31 31 static inline void set_mask ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 32 32 static inline void reset_mask( monitor_desc * this ); 33 33 34 static inline thread_desc * next_thread( monitor_desc * this );34 static inline thread_desc * next_thread( monitor_desc * this, enum __Owner_Reason ); 35 35 static inline bool is_accepted( monitor_desc * this, const __monitor_group_t & monitors ); 36 36 … … 94 94 if( !this->owner ) { 95 95 // No one has the monitor, just take it 96 set_owner( this, thrd );96 set_owner( this, thrd, __ENTER_FREE ); 97 97 98 98 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); … … 106 106 else if( is_accepted( this, group) ) { 107 107 // Some one was waiting for us, enter 108 set_owner( this, thrd );108 set_owner( this, thrd, __ENTER_ACCEPT ); 109 109 110 110 // Reset mask … … 117 117 118 118 // Some one else has the monitor, wait in line for it 119 /* paranoid */ verify( thrd->next == 0p ); 119 120 append( this->entry_queue, thrd ); 120 121 BlockInternal( &this->lock ); 121 /* paranoid */ verify( thrd->next == 1p ); 122 123 unlock( this->lock ); 124 park(); 122 125 123 126 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 124 127 125 / / BlockInternal will unlock spinlock, no need to unlock ourselves128 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 126 129 return; 127 130 } 128 131 129 132 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 133 134 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 135 /* paranoid */ verify( this->lock.lock ); 130 136 131 137 // Release the lock and leave … … 147 153 148 154 // No one has the monitor, just take it 149 set_owner( this, thrd ); 155 set_owner( this, thrd, __ENTER_DTOR_FREE ); 156 157 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 150 158 151 159 unlock( this->lock ); … … 166 174 // Wake the thread that is waiting for this 167 175 __condition_criterion_t * urgent = pop( this->signal_stack ); 168 verify( urgent );176 /* paranoid */ verify( urgent ); 169 177 170 178 // Reset mask … … 175 183 176 184 // Some one else has the monitor, wait for him to finish and then run 177 BlockInternal( &this->lock, urgent->owner->waiting_thread ); 185 unlock( this->lock ); 186 187 // Release the next thread 188 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 189 unpark( urgent->owner->waiting_thread ); 190 191 // Park current thread waiting 192 park(); 178 193 179 194 // Some one was waiting for us, enter 180 set_owner( this, thrd);195 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 181 196 } 182 197 else { … … 187 202 188 203 // Some one else has the monitor, wait in line for it 204 /* paranoid */ verify( thrd->next == 0p ); 189 205 append( this->entry_queue, thrd ); 190 BlockInternal( &this->lock ); 191 192 // BlockInternal will unlock spinlock, no need to unlock ourselves 206 /* paranoid */ verify( thrd->next == 1p ); 207 unlock( this->lock ); 208 209 // Park current thread waiting 210 park(); 211 212 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 193 213 return; 194 214 } … … 205 225 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 206 226 207 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );227 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 208 228 209 229 // Leaving a recursion level, decrement the counter … … 219 239 220 240 // Get the next thread, will be null on low contention monitor 221 thread_desc * new_owner = next_thread( this ); 241 thread_desc * new_owner = next_thread( this, __LEAVE ); 242 243 // Check the new owner is consistent with who we wake-up 244 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor 245 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 222 246 223 247 // We can now let other threads in safely … … 225 249 226 250 //We need to wake-up the thread 227 WakeThread( new_owner, false ); 251 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 252 unpark( new_owner ); 228 253 } 229 254 … … 254 279 thrd->self_cor.state = Halted; 255 280 256 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );281 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 257 282 258 283 // Leaving a recursion level, decrement the counter … … 264 289 265 290 // Fetch the next thread, can be null 266 thread_desc * new_owner = next_thread( this ); 291 thread_desc * new_owner = next_thread( this, __LEAVE_THREAD ); 292 293 // Release the monitor lock 294 unlock( this->lock ); 295 296 // Unpark the next owner if needed 297 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 298 unpark( new_owner ); 267 299 268 300 // Leave the thread, this will unlock the spinlock 269 // Use leave thread instead of BlockInternalwhich is270 // specialized for this case and supports null new_owner271 LeaveThread( &this->lock, new_owner);301 // Use leave thread instead of park which is 302 // specialized for this case 303 LeaveThread(); 272 304 273 305 // Control flow should never reach here! … … 400 432 // Append the current wait operation to the ones already queued on the condition 401 433 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 434 /* paranoid */ verify( waiter.next == 0p ); 402 435 append( this.blocked, &waiter ); 436 /* paranoid */ verify( waiter.next == 1p ); 403 437 404 438 // Lock all monitors (aggregates the locks as well) … … 415 449 // Remove any duplicate threads 416 450 for( __lock_size_t i = 0; i < count; i++) { 417 thread_desc * new_owner = next_thread( monitors[i] );451 thread_desc * new_owner = next_thread( monitors[i], __WAIT ); 418 452 insert_unique( threads, thread_count, new_owner ); 419 453 } 420 454 455 // Unlock the locks, we don't need them anymore 456 for(int i = 0; i < count; i++) { 457 unlock( *locks[i] ); 458 } 459 460 // Wake the threads 461 for(int i = 0; i < thread_count; i++) { 462 unpark( threads[i] ); 463 } 464 421 465 // Everything is ready to go to sleep 422 BlockInternal( locks, count, threads, thread_count);466 park(); 423 467 424 468 // We are back, restore the owners and recursions … … 490 534 //Find the thread to run 491 535 thread_desc * signallee = pop_head( this.blocked )->waiting_thread; 492 set_owner( monitors, count, signallee ); 536 /* paranoid */ verify( signallee->next == 0p ); 537 set_owner( monitors, count, signallee, __ENTER_SIGNAL_BLOCK ); 493 538 494 539 __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee ); 495 540 541 // unlock all the monitors 542 unlock_all( locks, count ); 543 544 // unpark the thread we signalled 545 unpark( signallee ); 546 496 547 //Everything is ready to go to sleep 497 BlockInternal( locks, count, &signallee, 1);548 park(); 498 549 499 550 … … 590 641 591 642 // Set the owners to be the next thread 592 set_owner( monitors, count, next ); 593 594 // Everything is ready to go to sleep 595 BlockInternal( locks, count, &next, 1 ); 643 set_owner( monitors, count, next, __WAITFOR ); 644 645 // unlock all the monitors 646 unlock_all( locks, count ); 647 648 // unpark the thread we signalled 649 unpark( next ); 650 651 //Everything is ready to go to sleep 652 park(); 596 653 597 654 // We are back, restore the owners and recursions … … 631 688 } 632 689 690 // unlock all the monitors 691 unlock_all( locks, count ); 692 633 693 //Everything is ready to go to sleep 634 BlockInternal( locks, count);694 park(); 635 695 636 696 … … 649 709 // Utilities 650 710 651 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {652 / / __cfaabi_dbg_print_safe( "Kernal : Setting owner of %p to %p ( was %p)\n", this, owner, this->owner);711 static inline void set_owner( monitor_desc * this, thread_desc * owner, enum __Owner_Reason reason ) { 712 /* paranoid */ verify( this->lock.lock ); 653 713 654 714 //Pass the monitor appropriately 655 715 this->owner = owner; 716 this->owner_reason = reason; 656 717 657 718 //We are passing the monitor to someone else, which means recursion level is not 0 … … 659 720 } 660 721 661 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) { 662 monitors[0]->owner = owner; 663 monitors[0]->recursion = 1; 722 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner, enum __Owner_Reason reason ) { 723 /* paranoid */ verify ( monitors[0]->lock.lock ); 724 /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] ); 725 monitors[0]->owner = owner; 726 monitors[0]->owner_reason = reason; 727 monitors[0]->recursion = 1; 664 728 for( __lock_size_t i = 1; i < count; i++ ) { 665 monitors[i]->owner = owner; 666 monitors[i]->recursion = 0; 729 /* paranoid */ verify ( monitors[i]->lock.lock ); 730 /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] ); 731 monitors[i]->owner = owner; 732 monitors[i]->owner_reason = reason; 733 monitors[i]->recursion = 0; 667 734 } 668 735 } … … 680 747 } 681 748 682 static inline thread_desc * next_thread( monitor_desc * this ) {749 static inline thread_desc * next_thread( monitor_desc * this, enum __Owner_Reason reason ) { 683 750 //Check the signaller stack 684 751 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); … … 688 755 //regardless of if we are ready to baton pass, 689 756 //we need to set the monitor as in use 690 set_owner( this, urgent->owner->waiting_thread ); 757 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 758 set_owner( this, urgent->owner->waiting_thread, reason ); 691 759 692 760 return check_condition( urgent ); … … 696 764 // Get the next thread in the entry_queue 697 765 thread_desc * new_owner = pop_head( this->entry_queue ); 698 set_owner( this, new_owner ); 766 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 767 /* paranoid */ verify( !new_owner || new_owner->next == 0p ); 768 set_owner( this, new_owner, reason ); 699 769 700 770 return new_owner; … … 841 911 // For each thread in the entry-queue 842 912 for( thread_desc ** thrd_it = &entry_queue.head; 843 *thrd_it ;913 *thrd_it != 1p; 844 914 thrd_it = &(*thrd_it)->next 845 915 ) { -
libcfa/src/concurrency/monitor.hfa
r9f575ea r3381ed7 32 32 signal_stack{}; 33 33 owner = 0p; 34 owner_reason = __NO_OWNER; 34 35 recursion = 0; 35 36 mask.accepted = 0p; … … 133 134 bool signal ( condition & this ); 134 135 bool signal_block( condition & this ); 135 static inline bool is_empty ( condition & this ) { return !this.blocked.head; }136 static inline bool is_empty ( condition & this ) { return this.blocked.head == 1p; } 136 137 uintptr_t front ( condition & this ); 137 138 -
libcfa/src/concurrency/mutex.cfa
r9f575ea r3381ed7 40 40 if( is_locked ) { 41 41 append( blocked_threads, kernelTLS.this_thread ); 42 BlockInternal( &lock ); 42 unlock( lock ); 43 park(); 43 44 } 44 45 else { … … 62 63 lock( this.lock __cfaabi_dbg_ctx2 ); 63 64 this.is_locked = (this.blocked_threads != 0); 64 WakeThread(65 pop_head( this.blocked_threads ) , false65 unpark( 66 pop_head( this.blocked_threads ) 66 67 ); 67 68 unlock( this.lock ); … … 94 95 else { 95 96 append( blocked_threads, kernelTLS.this_thread ); 96 BlockInternal( &lock ); 97 unlock( lock ); 98 park(); 97 99 } 98 100 } … … 121 123 owner = thrd; 122 124 recursion_count = (thrd ? 1 : 0); 123 WakeThread( thrd, false);125 unpark( thrd ); 124 126 } 125 127 unlock( lock ); … … 138 140 void notify_one(condition_variable & this) with(this) { 139 141 lock( lock __cfaabi_dbg_ctx2 ); 140 WakeThread(141 pop_head( this.blocked_threads ) , false142 unpark( 143 pop_head( this.blocked_threads ) 142 144 ); 143 145 unlock( lock ); … … 147 149 lock( lock __cfaabi_dbg_ctx2 ); 148 150 while(this.blocked_threads) { 149 WakeThread(150 pop_head( this.blocked_threads ) , false151 unpark( 152 pop_head( this.blocked_threads ) 151 153 ); 152 154 } … … 157 159 lock( this.lock __cfaabi_dbg_ctx2 ); 158 160 append( this.blocked_threads, kernelTLS.this_thread ); 159 BlockInternal( &this.lock ); 161 unlock( this.lock ); 162 park(); 160 163 } 161 164 … … 164 167 lock( this.lock __cfaabi_dbg_ctx2 ); 165 168 append( this.blocked_threads, kernelTLS.this_thread ); 166 void __unlock(void) { 167 unlock(l); 168 unlock(this.lock); 169 } 170 BlockInternal( __unlock ); 169 unlock(l); 170 unlock(this.lock); 171 park(); 171 172 lock(l); 172 173 } -
libcfa/src/concurrency/preemption.cfa
r9f575ea r3381ed7 187 187 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 188 188 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 189 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store190 189 191 190 with( kernelTLS.preemption_state ){ … … 209 208 if( proc->pending_preemption ) { 210 209 proc->pending_preemption = false; 211 BlockInternal( thrd);210 force_yield( __POLL_PREEMPTION ); 212 211 } 213 212 } … … 394 393 // Preemption can occur here 395 394 396 kernelTLS.this_thread->preempted = true; 397 BlockInternal(); // Do the actual CtxSwitch 395 force_yield( __ALARM_PREEMPTION ); // Do the actual CtxSwitch 398 396 } 399 397 -
libcfa/src/concurrency/thread.cfa
r9f575ea r3381ed7 36 36 self_cor{ name, storage, storageSize }; 37 37 state = Start; 38 preempted = false;38 preempted = __NO_PREEMPTION; 39 39 curr_cor = &self_cor; 40 40 self_mon.owner = &this; … … 78 78 void __thrd_start( T & this, void (*main_p)(T &) ) { 79 79 thread_desc * this_thrd = get_thread(this); 80 thread_desc * curr_thrd = TL_GET( this_thread );81 80 82 81 disable_interrupts(); … … 85 84 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 86 85 verify( this_thrd->context.SP ); 87 // CtxSwitch( &curr_thrd->context, &this_thrd->context );88 86 89 87 ScheduleThread(this_thrd); 90 88 enable_interrupts( __cfaabi_dbg_ctx ); 91 }92 93 void yield( void ) {94 // Safety note : This could cause some false positives due to preemption95 verify( TL_GET( preemption_state.enabled ) );96 BlockInternal( TL_GET( this_thread ) );97 // Safety note : This could cause some false positives due to preemption98 verify( TL_GET( preemption_state.enabled ) );99 }100 101 void yield( unsigned times ) {102 for( unsigned i = 0; i < times; i++ ) {103 yield();104 }105 89 } 106 90 -
libcfa/src/concurrency/thread.hfa
r9f575ea r3381ed7 88 88 void ^?{}( scoped(T)& this ); 89 89 90 void yield(); 91 void yield( unsigned times ); 90 //----------------------------------------------------------------------------- 91 // Thread getters 92 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 92 93 93 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 94 //----------------------------------------------------------------------------- 95 // Scheduler API 96 97 //---------- 98 // Park thread: block until corresponding call to unpark, won't block if unpark is already called 99 void park( void ); 100 101 //---------- 102 // Unpark a thread, if the thread is already blocked, schedule it 103 // if the thread is not yet block, signal that it should rerun immediately or reschedule itself 104 void unpark( thread_desc * this, bool must_yield ); 105 106 static inline void unpark( thread_desc * this ) { unpark( this, false ); } 107 108 forall( dtype T | is_thread(T) ) 109 static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ), false );} 110 111 forall( dtype T | is_thread(T) ) 112 static inline void unpark( T & this, bool must_yield ) { if(!&this) return; unpark( get_thread( this ), must_yield );} 113 114 //---------- 115 // Yield: force thread to block and be rescheduled 116 static inline void yield() { 117 unpark( active_thread(), true ); 118 park(); 119 } 120 121 // Yield: yield N times 122 static inline void yield( unsigned times ) { 123 for( times ) { 124 yield(); 125 } 126 } 94 127 95 128 // Local Variables: // -
tests/concurrent/examples/.expect/datingService.txt
r9f575ea r3381ed7 1 Girl:17 is dating Boy at 2 with ccode 172 Boy:2 is dating Girl 17 with ccode 173 Boy:14 is dating Girl 5 with ccode 54 Girl:5 is dating Boy at 14 with ccode 55 Boy:9 is dating Girl 10 with ccode 106 Girl:10 is dating Boy at 9 with ccode 107 Boy:1 is dating Girl 18 with ccode 188 Girl:18 is dating Boy at 1 with ccode 189 Boy:16 is dating Girl 3 with ccode 310 Girl:3 is dating Boy at 16 with ccode 311 Boy:5 is dating Girl 14 with ccode 1412 Girl:14 is dating Boy at 5 with ccode 1413 Boy:15 is dating Girl 4 with ccode 414 Girl:4 is dating Boy at 15 with ccode 415 Girl:0 is dating Boy at 19 with ccode 016 Boy:19 is dating Girl 0 with ccode 017 Girl:9 is dating Boy at 10 with ccode 918 Boy:10 is dating Girl 9 with ccode 919 Girl:11 is dating Boy at 8 with ccode 1120 Boy:8 is dating Girl 11 with ccode 1121 Boy:12 is dating Girl 7 with ccode 722 Girl:7 is dating Boy at 12 with ccode 723 Boy:11 is dating Girl 8 with ccode 824 Girl:8 is dating Boy at 11 with ccode 825 Girl:16 is dating Boy at 3 with ccode 1626 Boy:3 is dating Girl 16 with ccode 1627 Girl:15 is dating Boy at 4 with ccode 1528 Boy:4 is dating Girl 15 with ccode 1529 Girl:19 is dating Boy at 0 with ccode 1930 Boy:0 is dating Girl 19 with ccode 1931 Girl:2 is dating Boy at 17 with ccode 232 Boy:17 is dating Girl 2 with ccode 233 Boy:13 is dating Girl 6 with ccode 634 Girl:6 is dating Boy at 13 with ccode 635 Boy:7 is dating Girl 12 with ccode 1236 Girl:12 is dating Boy at 7 with ccode 1237 Girl:13 is dating Boy at 6 with ccode 1338 Boy:6 is dating Girl 13 with ccode 1339 Girl:1 is dating Boy at 18 with ccode 140 Boy:18 is dating Girl 1 with ccode 1 -
tests/concurrent/examples/datingService.cfa
r9f575ea r3381ed7 1 1 // 2 2 // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo 3 // 3 // 4 4 // The contents of this file are covered under the licence agreement in the 5 5 // file "LICENCE" distributed with Cforall. … … 35 35 signal_block( Boys[ccode] ); // restart boy to set phone number 36 36 } // if 37 sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode;37 //sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode; 38 38 return BoyPhoneNo; 39 39 } // DatingService girl … … 47 47 signal_block( Girls[ccode] ); // restart girl to set phone number 48 48 } // if 49 sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode;49 //sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode; 50 50 return GirlPhoneNo; 51 51 } // DatingService boy -
tests/concurrent/multi-monitor.cfa
r9f575ea r3381ed7 11 11 12 12 void increment( monitor_t & mutex p1, monitor_t & mutex p2, int & value ) { 13 assert(active_thread() == get_monitor(p1)->owner); 14 assert(active_thread() == get_monitor(p2)->owner); 13 15 value += 1; 16 assert(active_thread() == get_monitor(p1)->owner); 17 assert(active_thread() == get_monitor(p2)->owner); 14 18 } 15 19
Note: See TracChangeset
for help on using the changeset viewer.