Changeset b0c7419 for libcfa/src
- Timestamp:
- Feb 14, 2020, 3:28:17 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 8c50aed
- Parents:
- 50b8885
- Location:
- libcfa/src/concurrency
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/invoke.h
r50b8885 rb0c7419 92 92 }; 93 93 94 enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun , Reschedule};95 enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION };94 enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun }; 95 enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION }; 96 96 97 97 struct coroutine_desc { -
libcfa/src/concurrency/kernel.cfa
r50b8885 rb0c7419 213 213 this.cltr = &cltr; 214 214 terminated{ 0 }; 215 destroyer = 0p; 215 216 do_terminate = false; 216 217 preemption_alarm = 0p; … … 320 321 coroutine_desc * proc_cor = get_coroutine(this->runner); 321 322 322 // Reset the terminating actions here323 this->finish.action_code = No_Action;324 325 323 // Update global state 326 324 kernelTLS.this_thread = thrd_dst; … … 334 332 if(unlikely(thrd_dst->preempted)) { 335 333 thrd_dst->preempted = __NO_PREEMPTION; 336 verify(thrd_dst->state == Active || thrd_dst->state == Rerun || thrd_dst->state == Reschedule);334 verify(thrd_dst->state == Active || thrd_dst->state == Rerun); 337 335 } else { 338 336 verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive); … … 372 370 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 373 371 thrd_dst->state = Halted; 372 373 // We may need to wake someone up here since 374 unpark( this->destroyer ); 375 this->destroyer = 0p; 374 376 break RUNNING; 375 377 case Active: … … 380 382 // In this case, just run it again. 381 383 continue RUNNING; 382 case Reschedule:383 // This is case 3, someone tried to run this before it finished blocking384 // but it must go through the ready-queue385 thrd_dst->state = Inactive; /*restore invariant */386 ScheduleThread( thrd_dst );387 break RUNNING;388 384 default: 389 385 // This makes no sense, something is wrong abort … … 397 393 398 394 // KERNEL_ONLY 399 staticvoid returnToKernel() {395 void returnToKernel() { 400 396 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 401 397 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); … … 553 549 /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 554 550 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 555 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule,551 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun, 556 552 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 557 553 /* paranoid */ #endif … … 589 585 } 590 586 591 void unpark( thread_desc * thrd , bool must_yield) {587 void unpark( thread_desc * thrd ) { 592 588 if( !thrd ) return; 593 594 enum coroutine_state new_state = must_yield ? Reschedule : Rerun;595 589 596 590 disable_interrupts(); 597 591 static_assert(sizeof(thrd->state) == sizeof(int)); 598 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, new_state, __ATOMIC_SEQ_CST);592 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST); 599 593 switch(old_state) { 600 594 case Active: … … 609 603 break; 610 604 case Rerun: 611 case Reschedule:612 605 abort("More than one thread attempted to schedule thread %p\n", thrd); 613 606 break; … … 637 630 638 631 // KERNEL ONLY 639 void LeaveThread() {632 void __leave_thread() { 640 633 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 641 634 returnToKernel(); 635 abort(); 642 636 } 643 637 … … 649 643 650 644 thread_desc * thrd = kernelTLS.this_thread; 651 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun || thrd->state == Reschedule);645 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun); 652 646 653 647 // SKULLDUGGERY: It is possible that we are preempting this thread just before -
libcfa/src/concurrency/kernel.hfa
r50b8885 rb0c7419 45 45 extern struct cluster * mainCluster; 46 46 47 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };48 49 typedef void (*__finish_callback_fptr_t)(void);50 51 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)52 struct FinishAction {53 FinishOpCode action_code;54 /*55 // Union of possible actions56 union {57 // Option 1 : locks and threads58 struct {59 // 1 thread or N thread60 union {61 thread_desc * thrd;62 struct {63 thread_desc ** thrds;64 unsigned short thrd_count;65 };66 };67 // 1 lock or N lock68 union {69 __spinlock_t * lock;70 struct {71 __spinlock_t ** locks;72 unsigned short lock_count;73 };74 };75 };76 // Option 2 : action pointer77 __finish_callback_fptr_t callback;78 };79 /*/80 thread_desc * thrd;81 thread_desc ** thrds;82 unsigned short thrd_count;83 __spinlock_t * lock;84 __spinlock_t ** locks;85 unsigned short lock_count;86 __finish_callback_fptr_t callback;87 //*/88 };89 static inline void ?{}(FinishAction & this) {90 this.action_code = No_Action;91 this.thrd = 0p;92 this.lock = 0p;93 }94 static inline void ^?{}(FinishAction &) {}95 96 47 // Processor 97 48 coroutine processorCtx_t { … … 116 67 // RunThread data 117 68 // Action to do after a thread is ran 118 struct FinishAction finish;69 thread_desc * destroyer; 119 70 120 71 // Preemption data -
libcfa/src/concurrency/kernel_private.hfa
r50b8885 rb0c7419 34 34 35 35 //Block current thread and release/wake-up the following resources 36 void LeaveThread(); 37 38 bool force_yield( enum __Preemption_Reason ); 36 void __leave_thread() __attribute__((noreturn)); 39 37 40 38 //----------------------------------------------------------------------------- -
libcfa/src/concurrency/monitor.cfa
r50b8885 rb0c7419 277 277 disable_interrupts(); 278 278 279 thrd->s elf_cor.state = Halted;279 thrd->state = Halted; 280 280 281 281 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); … … 296 296 // Unpark the next owner if needed 297 297 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 298 unpark( new_owner ); 299 300 // Leave the thread, this will unlock the spinlock 301 // Use leave thread instead of park which is 302 // specialized for this case 303 LeaveThread(); 298 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 299 /* paranoid */ verify( ! kernelTLS.this_processor->destroyer ); 300 /* paranoid */ verify( thrd->state == Halted ); 301 302 kernelTLS.this_processor->destroyer = new_owner; 303 304 // Leave the thread 305 __leave_thread(); 304 306 305 307 // Control flow should never reach here! -
libcfa/src/concurrency/thread.hfa
r50b8885 rb0c7419 101 101 //---------- 102 102 // Unpark a thread, if the thread is already blocked, schedule it 103 // if the thread is not yet block, signal that it should rerun immediately or reschedule itself 104 void unpark( thread_desc * this, bool must_yield ); 105 106 static inline void unpark( thread_desc * this ) { unpark( this, false ); } 103 // if the thread is not yet block, signal that it should rerun immediately 104 void unpark( thread_desc * this ); 107 105 108 106 forall( dtype T | is_thread(T) ) 109 static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ), false );} 110 111 forall( dtype T | is_thread(T) ) 112 static inline void unpark( T & this, bool must_yield ) { if(!&this) return; unpark( get_thread( this ), must_yield );} 107 static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );} 113 108 114 109 //---------- 115 110 // Yield: force thread to block and be rescheduled 111 bool force_yield( enum __Preemption_Reason ); 112 116 113 static inline void yield() { 117 unpark( active_thread(), true ); 118 park(); 114 force_yield(__MANUAL_PREEMPTION); 119 115 } 120 116
Note: See TracChangeset
for help on using the changeset viewer.