Changes in / [c744563a:a505021]
- Files:
-
- 4 added
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/containers.hfa
rc744563a ra505021 146 146 static inline forall( dtype T | is_node(T) ) { 147 147 void ?{}( __queue(T) & this ) with( this ) { 148 head{ 0p };148 head{ 1p }; 149 149 tail{ &head }; 150 verify(*tail == 1p); 150 151 } 151 152 152 153 void append( __queue(T) & this, T * val ) with( this ) { 153 154 verify(tail != 0p); 155 verify(*tail == 1p); 154 156 *tail = val; 155 157 tail = &get_next( *val ); 158 *tail = 1p; 156 159 } 157 160 158 161 T * pop_head( __queue(T) & this ) { 162 verify(*this.tail == 1p); 159 163 T * head = this.head; 160 if( head ) {164 if( head != 1p ) { 161 165 this.head = get_next( *head ); 162 if( !get_next( *head )) {166 if( get_next( *head ) == 1p ) { 163 167 this.tail = &this.head; 164 168 } 165 169 get_next( *head ) = 0p; 166 } 167 return head; 170 verify(*this.tail == 1p); 171 return head; 172 } 173 verify(*this.tail == 1p); 174 return 0p; 168 175 } 169 176 … … 180 187 get_next( *val ) = 0p; 181 188 182 verify( (head == 0p) == (&head == tail) );183 verify( *tail == 0p );189 verify( (head == 1p) == (&head == tail) ); 190 verify( *tail == 1p ); 184 191 return val; 185 192 } -
libcfa/src/bits/locks.hfa
rc744563a ra505021 60 60 } 61 61 62 extern void yield( unsigned int );63 64 62 static inline void ?{}( __spinlock_t & this ) { 65 63 this.lock = 0; … … 68 66 // Lock the spinlock, return false if already acquired 69 67 static inline bool try_lock ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) { 68 disable_interrupts(); 70 69 bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0); 71 70 if( result ) { 72 disable_interrupts();73 71 __cfaabi_dbg_record( this, caller ); 72 } else { 73 enable_interrupts_noPoll(); 74 74 } 75 75 return result; … … 83 83 #endif 84 84 85 disable_interrupts(); 85 86 for ( unsigned int i = 1;; i += 1 ) { 86 87 if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break; … … 98 99 #endif 99 100 } 100 disable_interrupts();101 101 __cfaabi_dbg_record( this, caller ); 102 102 } 103 103 104 104 static inline void unlock( __spinlock_t & this ) { 105 __atomic_clear( &this.lock, __ATOMIC_RELEASE ); 105 106 enable_interrupts_noPoll(); 106 __atomic_clear( &this.lock, __ATOMIC_RELEASE );107 107 } 108 108 -
libcfa/src/concurrency/coroutine.hfa
rc744563a ra505021 54 54 void prime(T & cor); 55 55 56 static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; }56 static inline struct coroutine_desc * active_coroutine() __attribute__((const)) { return TL_GET( this_thread )->curr_cor; } 57 57 58 58 //----------------------------------------------------------------------------- … … 73 73 // Private wrappers for context switch and stack creation 74 74 // Wrapper for co 75 static inline void CoroutineCtxSwitch( coroutine_desc* src, coroutine_desc* dst) {75 static inline void CoroutineCtxSwitch( coroutine_desc * src, coroutine_desc * dst ) __attribute__((nonnull (1, 2))) { 76 76 // set state of current coroutine to inactive 77 77 src->state = src->state == Halted ? Halted : Inactive; … … 152 152 } 153 153 154 static inline void resume( coroutine_desc * dst) {154 static inline void resume( coroutine_desc * dst ) __attribute__((nonnull (1))) { 155 155 // optimization : read TLS once and reuse it 156 156 // Safety note: this is preemption safe since if -
libcfa/src/concurrency/invoke.h
rc744563a ra505021 92 92 }; 93 93 94 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 94 enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun }; 95 enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION }; 95 96 96 97 struct coroutine_desc { … … 164 165 165 166 // current execution status for coroutine 166 enum coroutine_state state; 167 volatile int state; 168 enum __Preemption_Reason preempted; 167 169 168 170 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it … … 198 200 #ifdef __cforall 199 201 extern "Cforall" { 200 static inline thread_desc *& get_next( thread_desc & this ) {202 static inline thread_desc *& get_next( thread_desc & this ) __attribute__((const)) { 201 203 return this.next; 202 204 } 203 205 204 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) {206 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) /*__attribute__((const))*/ { 205 207 return this.node.[next, prev]; 206 208 } … … 218 220 } 219 221 220 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {222 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) { 221 223 if( (lhs.data != 0) != (rhs.data != 0) ) return false; 222 224 if( lhs.size != rhs.size ) return false; -
libcfa/src/concurrency/kernel.cfa
rc744563a ra505021 110 110 //----------------------------------------------------------------------------- 111 111 //Start and stop routine for the kernel, declared first to make sure they run first 112 static void kernel_startup(void)__attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));113 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));112 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 113 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 114 114 115 115 //----------------------------------------------------------------------------- … … 208 208 } 209 209 210 static void start(processor * this); 210 static void * CtxInvokeProcessor(void * arg); 211 211 212 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) { 212 213 this.name = name; 213 214 this.cltr = &cltr; 214 215 terminated{ 0 }; 216 destroyer = 0p; 215 217 do_terminate = false; 216 218 preemption_alarm = 0p; … … 220 222 idleLock{}; 221 223 222 start( &this ); 224 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this); 225 226 this.stack = __create_pthread( &this.kernel_thread, CtxInvokeProcessor, (void *)&this ); 227 228 __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this); 223 229 } 224 230 … … 258 264 // Kernel Scheduling logic 259 265 //============================================================================================= 260 static void runThread(processor * this, thread_desc * dst);261 static void finishRunning(processor * this);262 static void halt(processor * this);266 static thread_desc * __next_thread(cluster * this); 267 static void __run_thread(processor * this, thread_desc * dst); 268 static void __halt(processor * this); 263 269 264 270 //Main of the processor contexts … … 283 289 thread_desc * readyThread = 0p; 284 290 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 285 readyThread = nextThread( this->cltr );291 readyThread = __next_thread( this->cltr ); 286 292 287 293 if(readyThread) { 288 verify( ! kernelTLS.preemption_state.enabled ); 289 290 runThread(this, readyThread); 291 292 verify( ! kernelTLS.preemption_state.enabled ); 293 294 //Some actions need to be taken from the kernel 295 finishRunning(this); 294 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 295 /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted); 296 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 297 298 __run_thread(this, readyThread); 299 300 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 296 301 297 302 spin_count = 0; 298 303 } else { 299 304 // spin(this, &spin_count); 300 halt(this);305 __halt(this); 301 306 } 302 307 } … … 318 323 // runThread runs a thread by context switching 319 324 // from the processor coroutine to the target thread 320 static void runThread(processor * this, thread_desc * thrd_dst) {325 static void __run_thread(processor * this, thread_desc * thrd_dst) { 321 326 coroutine_desc * proc_cor = get_coroutine(this->runner); 322 323 // Reset the terminating actions here324 this->finish.action_code = No_Action;325 327 326 328 // Update global state 327 329 kernelTLS.this_thread = thrd_dst; 328 330 329 // set state of processor coroutine to inactive and the thread to active 330 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 331 thrd_dst->state = Active; 332 333 // set context switch to the thread that the processor is executing 334 verify( thrd_dst->context.SP ); 335 CtxSwitch( &proc_cor->context, &thrd_dst->context ); 336 // when CtxSwitch returns we are back in the processor coroutine 337 338 // set state of processor coroutine to active and the thread to inactive 339 thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive; 331 // set state of processor coroutine to inactive 332 verify(proc_cor->state == Active); 333 proc_cor->state = Inactive; 334 335 // Actually run the thread 336 RUNNING: while(true) { 337 if(unlikely(thrd_dst->preempted)) { 338 thrd_dst->preempted = __NO_PREEMPTION; 339 verify(thrd_dst->state == Active || thrd_dst->state == Rerun); 340 } else { 341 verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive); 342 thrd_dst->state = Active; 343 } 344 345 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 346 347 // set context switch to the thread that the processor is executing 348 verify( thrd_dst->context.SP ); 349 CtxSwitch( &proc_cor->context, &thrd_dst->context ); 350 // when CtxSwitch returns we are back in the processor coroutine 351 352 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 353 354 355 // We just finished running a thread, there are a few things that could have happened. 356 // 1 - Regular case : the thread has blocked and now one has scheduled it yet. 357 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it. 358 // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way 359 // 4 - Preempted 360 // In case 1, we may have won a race so we can't write to the state again. 361 // In case 2, we lost the race so we now own the thread. 362 // In case 3, we lost the race but can just reschedule the thread. 363 364 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 365 // The thread was preempted, reschedule it and reset the flag 366 __schedule_thread( thrd_dst ); 367 break RUNNING; 368 } 369 370 // set state of processor coroutine to active and the thread to inactive 371 static_assert(sizeof(thrd_dst->state) == sizeof(int)); 372 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST); 373 switch(old_state) { 374 case Halted: 375 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 376 thrd_dst->state = Halted; 377 378 // We may need to wake someone up here since 379 unpark( this->destroyer ); 380 this->destroyer = 0p; 381 break RUNNING; 382 case Active: 383 // This is case 1, the regular case, nothing more is needed 384 break RUNNING; 385 case Rerun: 386 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 387 // In this case, just run it again. 388 continue RUNNING; 389 default: 390 // This makes no sense, something is wrong abort 391 abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state); 392 } 393 } 394 395 // Just before returning to the processor, set the processor coroutine to active 340 396 proc_cor->state = Active; 341 397 } 342 398 343 399 // KERNEL_ONLY 344 static void returnToKernel() { 400 void returnToKernel() { 401 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 345 402 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 346 403 thread_desc * thrd_src = kernelTLS.this_thread; 347 404 348 // set state of current coroutine to inactive 349 thrd_src->state = thrd_src->state == Halted ? Halted : Inactive; 350 proc_cor->state = Active; 351 int local_errno = *__volatile_errno(); 352 #if defined( __i386 ) || defined( __x86_64 ) 353 __x87_store; 354 #endif 355 356 // set new coroutine that the processor is executing 357 // and context switch to it 358 verify( proc_cor->context.SP ); 359 CtxSwitch( &thrd_src->context, &proc_cor->context ); 360 361 // set state of new coroutine to active 362 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 363 thrd_src->state = Active; 364 365 #if defined( __i386 ) || defined( __x86_64 ) 366 __x87_load; 367 #endif 368 *__volatile_errno() = local_errno; 369 } 370 371 // KERNEL_ONLY 372 // Once a thread has finished running, some of 373 // its final actions must be executed from the kernel 374 static void finishRunning(processor * this) with( this->finish ) { 375 verify( ! kernelTLS.preemption_state.enabled ); 376 choose( action_code ) { 377 case No_Action: 378 break; 379 case Release: 380 unlock( *lock ); 381 case Schedule: 382 ScheduleThread( thrd ); 383 case Release_Schedule: 384 unlock( *lock ); 385 ScheduleThread( thrd ); 386 case Release_Multi: 387 for(int i = 0; i < lock_count; i++) { 388 unlock( *locks[i] ); 389 } 390 case Release_Multi_Schedule: 391 for(int i = 0; i < lock_count; i++) { 392 unlock( *locks[i] ); 393 } 394 for(int i = 0; i < thrd_count; i++) { 395 ScheduleThread( thrds[i] ); 396 } 397 case Callback: 398 callback(); 399 default: 400 abort("KERNEL ERROR: Unexpected action to run after thread"); 401 } 405 // Run the thread on this processor 406 { 407 int local_errno = *__volatile_errno(); 408 #if defined( __i386 ) || defined( __x86_64 ) 409 __x87_store; 410 #endif 411 verify( proc_cor->context.SP ); 412 CtxSwitch( &thrd_src->context, &proc_cor->context ); 413 #if defined( __i386 ) || defined( __x86_64 ) 414 __x87_load; 415 #endif 416 *__volatile_errno() = local_errno; 417 } 418 419 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 402 420 } 403 421 … … 447 465 } // Abort 448 466 449 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {467 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 450 468 pthread_attr_t attr; 451 469 … … 475 493 } 476 494 477 static void start(processor * this) {478 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);479 480 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );481 482 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);483 }484 485 495 // KERNEL_ONLY 486 voidkernel_first_resume( processor * this ) {496 static void __kernel_first_resume( processor * this ) { 487 497 thread_desc * src = mainThread; 488 498 coroutine_desc * dst = get_coroutine(this->runner); … … 516 526 517 527 // KERNEL_ONLY 518 voidkernel_last_resume( processor * this ) {528 static void __kernel_last_resume( processor * this ) { 519 529 coroutine_desc * src = &mainThread->self_cor; 520 530 coroutine_desc * dst = get_coroutine(this->runner); … … 530 540 //----------------------------------------------------------------------------- 531 541 // Scheduler routines 532 533 542 // KERNEL ONLY 534 void ScheduleThread( thread_desc * thrd ) { 535 verify( thrd ); 536 verify( thrd->state != Halted ); 537 538 verify( ! kernelTLS.preemption_state.enabled ); 539 540 verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 541 542 with( *thrd->curr_cluster ) { 543 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 544 bool was_empty = !(ready_queue != 0); 545 append( ready_queue, thrd ); 546 unlock( ready_queue_lock ); 547 548 if(was_empty) { 549 lock (proc_list_lock __cfaabi_dbg_ctx2); 550 if(idles) { 551 wake_fast(idles.head); 552 } 553 unlock (proc_list_lock); 543 void __schedule_thread( thread_desc * thrd ) with( *thrd->curr_cluster ) { 544 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 545 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 546 /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 547 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 548 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun, 549 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 550 /* paranoid */ #endif 551 /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 552 553 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 554 bool was_empty = !(ready_queue != 0); 555 append( ready_queue, thrd ); 556 unlock( ready_queue_lock ); 557 558 if(was_empty) { 559 lock (proc_list_lock __cfaabi_dbg_ctx2); 560 if(idles) { 561 wake_fast(idles.head); 554 562 } 555 else if( struct processor * idle = idles.head ) {556 wake_fast(idle);557 }558 559 } 560 561 verify( ! kernelTLS.preemption_state.enabled );563 unlock (proc_list_lock); 564 } 565 else if( struct processor * idle = idles.head ) { 566 wake_fast(idle); 567 } 568 569 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 562 570 } 563 571 564 572 // KERNEL ONLY 565 thread_desc * nextThread(cluster * this) with( *this ) { 566 verify( ! kernelTLS.preemption_state.enabled ); 573 static thread_desc * __next_thread(cluster * this) with( *this ) { 574 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 575 567 576 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 568 577 thread_desc * head = pop_head( ready_queue ); 569 578 unlock( ready_queue_lock ); 570 verify( ! kernelTLS.preemption_state.enabled ); 579 580 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 571 581 return head; 572 582 } 573 583 574 void BlockInternal() { 584 void unpark( thread_desc * thrd ) { 585 if( !thrd ) return; 586 575 587 disable_interrupts(); 576 verify( ! kernelTLS.preemption_state.enabled ); 588 static_assert(sizeof(thrd->state) == sizeof(int)); 589 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST); 590 switch(old_state) { 591 case Active: 592 // Wake won the race, the thread will reschedule/rerun itself 593 break; 594 case Inactive: 595 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 596 597 // Wake lost the race, 598 thrd->state = Inactive; 599 __schedule_thread( thrd ); 600 break; 601 case Rerun: 602 abort("More than one thread attempted to schedule thread %p\n", thrd); 603 break; 604 case Halted: 605 case Start: 606 case Primed: 607 default: 608 // This makes no sense, something is wrong abort 609 abort(); 610 } 611 enable_interrupts( __cfaabi_dbg_ctx ); 612 } 613 614 void park( void ) { 615 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 616 disable_interrupts(); 617 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 618 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION ); 619 577 620 returnToKernel(); 578 verify( ! kernelTLS.preemption_state.enabled ); 621 622 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 579 623 enable_interrupts( __cfaabi_dbg_ctx ); 580 } 581 582 void BlockInternal( __spinlock_t * lock ) { 624 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 625 626 } 627 628 // KERNEL ONLY 629 void __leave_thread() { 630 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 631 returnToKernel(); 632 abort(); 633 } 634 635 // KERNEL ONLY 636 bool force_yield( __Preemption_Reason reason ) { 637 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 583 638 disable_interrupts(); 584 with( *kernelTLS.this_processor ) { 585 finish.action_code = Release; 586 finish.lock = lock; 587 } 588 589 verify( ! kernelTLS.preemption_state.enabled ); 590 returnToKernel(); 591 verify( ! kernelTLS.preemption_state.enabled ); 592 593 enable_interrupts( __cfaabi_dbg_ctx ); 594 } 595 596 void BlockInternal( thread_desc * thrd ) { 597 disable_interrupts(); 598 with( * kernelTLS.this_processor ) { 599 finish.action_code = Schedule; 600 finish.thrd = thrd; 601 } 602 603 verify( ! kernelTLS.preemption_state.enabled ); 604 returnToKernel(); 605 verify( ! kernelTLS.preemption_state.enabled ); 606 607 enable_interrupts( __cfaabi_dbg_ctx ); 608 } 609 610 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) { 611 assert(thrd); 612 disable_interrupts(); 613 with( * kernelTLS.this_processor ) { 614 finish.action_code = Release_Schedule; 615 finish.lock = lock; 616 finish.thrd = thrd; 617 } 618 619 verify( ! kernelTLS.preemption_state.enabled ); 620 returnToKernel(); 621 verify( ! kernelTLS.preemption_state.enabled ); 622 623 enable_interrupts( __cfaabi_dbg_ctx ); 624 } 625 626 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 627 disable_interrupts(); 628 with( * kernelTLS.this_processor ) { 629 finish.action_code = Release_Multi; 630 finish.locks = locks; 631 finish.lock_count = count; 632 } 633 634 verify( ! kernelTLS.preemption_state.enabled ); 635 returnToKernel(); 636 verify( ! kernelTLS.preemption_state.enabled ); 637 638 enable_interrupts( __cfaabi_dbg_ctx ); 639 } 640 641 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 642 disable_interrupts(); 643 with( *kernelTLS.this_processor ) { 644 finish.action_code = Release_Multi_Schedule; 645 finish.locks = locks; 646 finish.lock_count = lock_count; 647 finish.thrds = thrds; 648 finish.thrd_count = thrd_count; 649 } 650 651 verify( ! kernelTLS.preemption_state.enabled ); 652 returnToKernel(); 653 verify( ! kernelTLS.preemption_state.enabled ); 654 655 enable_interrupts( __cfaabi_dbg_ctx ); 656 } 657 658 void BlockInternal(__finish_callback_fptr_t callback) { 659 disable_interrupts(); 660 with( *kernelTLS.this_processor ) { 661 finish.action_code = Callback; 662 finish.callback = callback; 663 } 664 665 verify( ! kernelTLS.preemption_state.enabled ); 666 returnToKernel(); 667 verify( ! kernelTLS.preemption_state.enabled ); 668 669 enable_interrupts( __cfaabi_dbg_ctx ); 670 } 671 672 // KERNEL ONLY 673 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 674 verify( ! kernelTLS.preemption_state.enabled ); 675 with( * kernelTLS.this_processor ) { 676 finish.action_code = thrd ? Release_Schedule : Release; 677 finish.lock = lock; 678 finish.thrd = thrd; 679 } 680 681 returnToKernel(); 639 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 640 641 thread_desc * thrd = kernelTLS.this_thread; 642 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun); 643 644 // SKULLDUGGERY: It is possible that we are preempting this thread just before 645 // it was going to park itself. If that is the case and it is already using the 646 // intrusive fields then we can't use them to preempt the thread 647 // If that is the case, abandon the preemption. 648 bool preempted = false; 649 if(thrd->next == 0p) { 650 preempted = true; 651 thrd->preempted = reason; 652 returnToKernel(); 653 } 654 655 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 656 enable_interrupts_noPoll(); 657 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 658 659 return preempted; 682 660 } 683 661 … … 687 665 //----------------------------------------------------------------------------- 688 666 // Kernel boot procedures 689 static void kernel_startup(void) {667 static void __kernel_startup(void) { 690 668 verify( ! kernelTLS.preemption_state.enabled ); 691 669 __cfaabi_dbg_print_safe("Kernel : Starting\n"); … … 748 726 // Add the main thread to the ready queue 749 727 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 750 ScheduleThread(mainThread);728 __schedule_thread(mainThread); 751 729 752 730 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 753 731 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 754 732 // mainThread is on the ready queue when this call is made. 755 kernel_first_resume( kernelTLS.this_processor );733 __kernel_first_resume( kernelTLS.this_processor ); 756 734 757 735 … … 765 743 } 766 744 767 static void kernel_shutdown(void) {745 static void __kernel_shutdown(void) { 768 746 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 769 747 … … 776 754 // which is currently here 777 755 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 778 kernel_last_resume( kernelTLS.this_processor );756 __kernel_last_resume( kernelTLS.this_processor ); 779 757 mainThread->self_cor.state = Halted; 780 758 … … 802 780 // Kernel Quiescing 803 781 //============================================================================================= 804 static void halt(processor * this) with( *this ) {782 static void __halt(processor * this) with( *this ) { 805 783 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 806 784 … … 913 891 914 892 // atomically release spin lock and block 915 BlockInternal( &lock ); 893 unlock( lock ); 894 park(); 916 895 } 917 896 else { … … 932 911 933 912 // make new owner 934 WakeThread( thrd );913 unpark( thrd ); 935 914 } 936 915 … … 990 969 //----------------------------------------------------------------------------- 991 970 // Debug 992 bool threading_enabled(void) {971 bool threading_enabled(void) __attribute__((const)) { 993 972 return true; 994 973 } -
libcfa/src/concurrency/kernel.hfa
rc744563a ra505021 45 45 extern struct cluster * mainCluster; 46 46 47 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };48 49 typedef void (*__finish_callback_fptr_t)(void);50 51 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)52 struct FinishAction {53 FinishOpCode action_code;54 /*55 // Union of possible actions56 union {57 // Option 1 : locks and threads58 struct {59 // 1 thread or N thread60 union {61 thread_desc * thrd;62 struct {63 thread_desc ** thrds;64 unsigned short thrd_count;65 };66 };67 // 1 lock or N lock68 union {69 __spinlock_t * lock;70 struct {71 __spinlock_t ** locks;72 unsigned short lock_count;73 };74 };75 };76 // Option 2 : action pointer77 __finish_callback_fptr_t callback;78 };79 /*/80 thread_desc * thrd;81 thread_desc ** thrds;82 unsigned short thrd_count;83 __spinlock_t * lock;84 __spinlock_t ** locks;85 unsigned short lock_count;86 __finish_callback_fptr_t callback;87 //*/88 };89 static inline void ?{}(FinishAction & this) {90 this.action_code = No_Action;91 this.thrd = 0p;92 this.lock = 0p;93 }94 static inline void ^?{}(FinishAction &) {}95 96 47 // Processor 97 48 coroutine processorCtx_t { … … 116 67 // RunThread data 117 68 // Action to do after a thread is ran 118 struct FinishAction finish;69 thread_desc * destroyer; 119 70 120 71 // Preemption data … … 157 108 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } 158 109 159 static inline [processor *&, processor *& ] __get( processor & this ) { 160 return this.node.[next, prev]; 161 } 110 static inline [processor *&, processor *& ] __get( processor & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; } 162 111 163 112 //----------------------------------------------------------------------------- … … 202 151 static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption()}; } 203 152 204 static inline [cluster *&, cluster *& ] __get( cluster & this ) { 205 return this.node.[next, prev]; 206 } 153 static inline [cluster *&, cluster *& ] __get( cluster & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; } 207 154 208 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE209 static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; }155 static inline struct processor * active_processor() __attribute__((const)) { return TL_GET( this_processor ); } // UNSAFE 156 static inline struct cluster * active_cluster () __attribute__((const)) { return TL_GET( this_processor )->cltr; } 210 157 211 158 // Local Variables: // -
libcfa/src/concurrency/kernel_private.hfa
rc744563a ra505021 31 31 } 32 32 33 void ScheduleThread( thread_desc * ); 34 static inline void WakeThread( thread_desc * thrd ) { 35 if( !thrd ) return; 36 37 verify(thrd->state == Inactive); 38 39 disable_interrupts(); 40 ScheduleThread( thrd ); 41 enable_interrupts( __cfaabi_dbg_ctx ); 42 } 43 thread_desc * nextThread(cluster * this); 33 void __schedule_thread( thread_desc * ) __attribute__((nonnull (1))); 44 34 45 35 //Block current thread and release/wake-up the following resources 46 void BlockInternal(void); 47 void BlockInternal(__spinlock_t * lock); 48 void BlockInternal(thread_desc * thrd); 49 void BlockInternal(__spinlock_t * lock, thread_desc * thrd); 50 void BlockInternal(__spinlock_t * locks [], unsigned short count); 51 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 52 void BlockInternal(__finish_callback_fptr_t callback); 53 void LeaveThread(__spinlock_t * lock, thread_desc * thrd); 36 void __leave_thread() __attribute__((noreturn)); 54 37 55 38 //----------------------------------------------------------------------------- … … 57 40 void main(processorCtx_t *); 58 41 59 void * create_pthread( pthread_t *, void * (*)(void *), void * );42 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 60 43 61 44 static inline void wake_fast(processor * this) { … … 102 85 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 103 86 104 static inline uint32_t tls_rand() {87 static inline uint32_t __tls_rand() { 105 88 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6; 106 89 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21; -
libcfa/src/concurrency/monitor.cfa
rc744563a ra505021 117 117 118 118 // Some one else has the monitor, wait in line for it 119 /* paranoid */ verify( thrd->next == 0p ); 119 120 append( this->entry_queue, thrd ); 120 121 BlockInternal( &this->lock ); 121 /* paranoid */ verify( thrd->next == 1p ); 122 123 unlock( this->lock ); 124 park(); 122 125 123 126 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 124 127 125 / / BlockInternal will unlock spinlock, no need to unlock ourselves128 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 126 129 return; 127 130 } 128 131 129 132 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 133 134 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 135 /* paranoid */ verify( this->lock.lock ); 130 136 131 137 // Release the lock and leave … … 149 155 set_owner( this, thrd ); 150 156 157 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 158 151 159 unlock( this->lock ); 152 160 return; … … 166 174 // Wake the thread that is waiting for this 167 175 __condition_criterion_t * urgent = pop( this->signal_stack ); 168 verify( urgent );176 /* paranoid */ verify( urgent ); 169 177 170 178 // Reset mask … … 175 183 176 184 // Some one else has the monitor, wait for him to finish and then run 177 BlockInternal( &this->lock, urgent->owner->waiting_thread ); 185 unlock( this->lock ); 186 187 // Release the next thread 188 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 189 unpark( urgent->owner->waiting_thread ); 190 191 // Park current thread waiting 192 park(); 178 193 179 194 // Some one was waiting for us, enter 180 set_owner( this, thrd);195 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 181 196 } 182 197 else { … … 187 202 188 203 // Some one else has the monitor, wait in line for it 204 /* paranoid */ verify( thrd->next == 0p ); 189 205 append( this->entry_queue, thrd ); 190 BlockInternal( &this->lock ); 191 192 // BlockInternal will unlock spinlock, no need to unlock ourselves 206 /* paranoid */ verify( thrd->next == 1p ); 207 unlock( this->lock ); 208 209 // Park current thread waiting 210 park(); 211 212 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 193 213 return; 194 214 } … … 205 225 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 206 226 207 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this );227 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 208 228 209 229 // Leaving a recursion level, decrement the counter … … 221 241 thread_desc * new_owner = next_thread( this ); 222 242 243 // Check the new owner is consistent with who we wake-up 244 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor 245 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 246 223 247 // We can now let other threads in safely 224 248 unlock( this->lock ); 225 249 226 250 //We need to wake-up the thread 227 WakeThread( new_owner ); 251 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 252 unpark( new_owner ); 228 253 } 229 254 … … 252 277 disable_interrupts(); 253 278 254 thrd->s elf_cor.state = Halted;255 256 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );279 thrd->state = Halted; 280 281 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 257 282 258 283 // Leaving a recursion level, decrement the counter … … 266 291 thread_desc * new_owner = next_thread( this ); 267 292 268 // Leave the thread, this will unlock the spinlock 269 // Use leave thread instead of BlockInternal which is 270 // specialized for this case and supports null new_owner 271 LeaveThread( &this->lock, new_owner ); 293 // Release the monitor lock 294 unlock( this->lock ); 295 296 // Unpark the next owner if needed 297 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 298 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 299 /* paranoid */ verify( ! kernelTLS.this_processor->destroyer ); 300 /* paranoid */ verify( thrd->state == Halted ); 301 302 kernelTLS.this_processor->destroyer = new_owner; 303 304 // Leave the thread 305 __leave_thread(); 272 306 273 307 // Control flow should never reach here! … … 400 434 // Append the current wait operation to the ones already queued on the condition 401 435 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 436 /* paranoid */ verify( waiter.next == 0p ); 402 437 append( this.blocked, &waiter ); 438 /* paranoid */ verify( waiter.next == 1p ); 403 439 404 440 // Lock all monitors (aggregates the locks as well) … … 419 455 } 420 456 457 // Unlock the locks, we don't need them anymore 458 for(int i = 0; i < count; i++) { 459 unlock( *locks[i] ); 460 } 461 462 // Wake the threads 463 for(int i = 0; i < thread_count; i++) { 464 unpark( threads[i] ); 465 } 466 421 467 // Everything is ready to go to sleep 422 BlockInternal( locks, count, threads, thread_count);468 park(); 423 469 424 470 // We are back, restore the owners and recursions … … 490 536 //Find the thread to run 491 537 thread_desc * signallee = pop_head( this.blocked )->waiting_thread; 538 /* paranoid */ verify( signallee->next == 0p ); 492 539 set_owner( monitors, count, signallee ); 493 540 494 541 __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee ); 495 542 543 // unlock all the monitors 544 unlock_all( locks, count ); 545 546 // unpark the thread we signalled 547 unpark( signallee ); 548 496 549 //Everything is ready to go to sleep 497 BlockInternal( locks, count, &signallee, 1);550 park(); 498 551 499 552 … … 592 645 set_owner( monitors, count, next ); 593 646 594 // Everything is ready to go to sleep 595 BlockInternal( locks, count, &next, 1 ); 647 // unlock all the monitors 648 unlock_all( locks, count ); 649 650 // unpark the thread we signalled 651 unpark( next ); 652 653 //Everything is ready to go to sleep 654 park(); 596 655 597 656 // We are back, restore the owners and recursions … … 631 690 } 632 691 692 // unlock all the monitors 693 unlock_all( locks, count ); 694 633 695 //Everything is ready to go to sleep 634 BlockInternal( locks, count);696 park(); 635 697 636 698 … … 650 712 651 713 static inline void set_owner( monitor_desc * this, thread_desc * owner ) { 652 / / __cfaabi_dbg_print_safe( "Kernal : Setting owner of %p to %p ( was %p)\n", this, owner, this->owner);714 /* paranoid */ verify( this->lock.lock ); 653 715 654 716 //Pass the monitor appropriately … … 660 722 661 723 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) { 662 monitors[0]->owner = owner; 663 monitors[0]->recursion = 1; 724 /* paranoid */ verify ( monitors[0]->lock.lock ); 725 /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] ); 726 monitors[0]->owner = owner; 727 monitors[0]->recursion = 1; 664 728 for( __lock_size_t i = 1; i < count; i++ ) { 665 monitors[i]->owner = owner; 666 monitors[i]->recursion = 0; 729 /* paranoid */ verify ( monitors[i]->lock.lock ); 730 /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] ); 731 monitors[i]->owner = owner; 732 monitors[i]->recursion = 0; 667 733 } 668 734 } … … 688 754 //regardless of if we are ready to baton pass, 689 755 //we need to set the monitor as in use 756 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 690 757 set_owner( this, urgent->owner->waiting_thread ); 691 758 … … 696 763 // Get the next thread in the entry_queue 697 764 thread_desc * new_owner = pop_head( this->entry_queue ); 765 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 766 /* paranoid */ verify( !new_owner || new_owner->next == 0p ); 698 767 set_owner( this, new_owner ); 699 768 … … 841 910 // For each thread in the entry-queue 842 911 for( thread_desc ** thrd_it = &entry_queue.head; 843 *thrd_it ;912 *thrd_it != 1p; 844 913 thrd_it = &(*thrd_it)->next 845 914 ) { -
libcfa/src/concurrency/monitor.hfa
rc744563a ra505021 133 133 bool signal ( condition & this ); 134 134 bool signal_block( condition & this ); 135 static inline bool is_empty ( condition & this ) { return !this.blocked.head; }135 static inline bool is_empty ( condition & this ) { return this.blocked.head == 1p; } 136 136 uintptr_t front ( condition & this ); 137 137 -
libcfa/src/concurrency/mutex.cfa
rc744563a ra505021 40 40 if( is_locked ) { 41 41 append( blocked_threads, kernelTLS.this_thread ); 42 BlockInternal( &lock ); 42 unlock( lock ); 43 park(); 43 44 } 44 45 else { … … 62 63 lock( this.lock __cfaabi_dbg_ctx2 ); 63 64 this.is_locked = (this.blocked_threads != 0); 64 WakeThread(65 unpark( 65 66 pop_head( this.blocked_threads ) 66 67 ); … … 94 95 else { 95 96 append( blocked_threads, kernelTLS.this_thread ); 96 BlockInternal( &lock ); 97 unlock( lock ); 98 park(); 97 99 } 98 100 } … … 121 123 owner = thrd; 122 124 recursion_count = (thrd ? 1 : 0); 123 WakeThread( thrd );125 unpark( thrd ); 124 126 } 125 127 unlock( lock ); … … 138 140 void notify_one(condition_variable & this) with(this) { 139 141 lock( lock __cfaabi_dbg_ctx2 ); 140 WakeThread(142 unpark( 141 143 pop_head( this.blocked_threads ) 142 144 ); … … 147 149 lock( lock __cfaabi_dbg_ctx2 ); 148 150 while(this.blocked_threads) { 149 WakeThread(151 unpark( 150 152 pop_head( this.blocked_threads ) 151 153 ); … … 157 159 lock( this.lock __cfaabi_dbg_ctx2 ); 158 160 append( this.blocked_threads, kernelTLS.this_thread ); 159 BlockInternal( &this.lock ); 161 unlock( this.lock ); 162 park(); 160 163 } 161 164 … … 164 167 lock( this.lock __cfaabi_dbg_ctx2 ); 165 168 append( this.blocked_threads, kernelTLS.this_thread ); 166 void __unlock(void) { 167 unlock(l); 168 unlock(this.lock); 169 } 170 BlockInternal( __unlock ); 169 unlock(l); 170 unlock(this.lock); 171 park(); 171 172 lock(l); 172 173 } -
libcfa/src/concurrency/preemption.cfa
rc744563a ra505021 187 187 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 188 188 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 189 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store190 189 191 190 with( kernelTLS.preemption_state ){ … … 209 208 if( proc->pending_preemption ) { 210 209 proc->pending_preemption = false; 211 BlockInternal( thrd);210 force_yield( __POLL_PREEMPTION ); 212 211 } 213 212 } … … 307 306 signal_block( SIGALRM ); 308 307 309 alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p );308 alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p ); 310 309 } 311 310 … … 394 393 // Preemption can occur here 395 394 396 BlockInternal( kernelTLS.this_thread); // Do the actual CtxSwitch395 force_yield( __ALARM_PREEMPTION ); // Do the actual CtxSwitch 397 396 } 398 397 -
libcfa/src/concurrency/thread.cfa
rc744563a ra505021 23 23 #include "invoke.h" 24 24 25 extern "C" {26 #include <fenv.h>27 #include <stddef.h>28 }29 30 //extern volatile thread_local processor * this_processor;31 32 25 //----------------------------------------------------------------------------- 33 26 // Thread ctors and dtors … … 36 29 self_cor{ name, storage, storageSize }; 37 30 state = Start; 31 preempted = __NO_PREEMPTION; 38 32 curr_cor = &self_cor; 39 33 self_mon.owner = &this; … … 55 49 } 56 50 51 //----------------------------------------------------------------------------- 52 // Starting and stopping threads 53 forall( dtype T | is_thread(T) ) 54 void __thrd_start( T & this, void (*main_p)(T &) ) { 55 thread_desc * this_thrd = get_thread(this); 56 57 disable_interrupts(); 58 CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread); 59 60 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 61 verify( this_thrd->context.SP ); 62 63 __schedule_thread(this_thrd); 64 enable_interrupts( __cfaabi_dbg_ctx ); 65 } 66 67 //----------------------------------------------------------------------------- 68 // Support for threads that don't ues the thread keyword 57 69 forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } ) 58 70 void ?{}( scoped(T)& this ) with( this ) { … … 72 84 } 73 85 74 //-----------------------------------------------------------------------------75 // Starting and stopping threads76 forall( dtype T | is_thread(T) )77 void __thrd_start( T & this, void (*main_p)(T &) ) {78 thread_desc * this_thrd = get_thread(this);79 80 disable_interrupts();81 CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);82 83 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];84 verify( this_thrd->context.SP );85 86 ScheduleThread(this_thrd);87 enable_interrupts( __cfaabi_dbg_ctx );88 }89 90 void yield( void ) {91 // Safety note : This could cause some false positives due to preemption92 verify( TL_GET( preemption_state.enabled ) );93 BlockInternal( TL_GET( this_thread ) );94 // Safety note : This could cause some false positives due to preemption95 verify( TL_GET( preemption_state.enabled ) );96 }97 98 void yield( unsigned times ) {99 for( unsigned i = 0; i < times; i++ ) {100 yield();101 }102 }103 104 86 // Local Variables: // 105 87 // mode: c // -
libcfa/src/concurrency/thread.hfa
rc744563a ra505021 31 31 }; 32 32 33 #define DECL_THREAD(X) thread_desc* get_thread(X& this) { return &this.__thrd; } void main(X& this) 33 // define that satisfies the trait without using the thread keyword 34 #define DECL_THREAD(X) thread_desc* get_thread(X& this) __attribute__((const)) { return &this.__thrd; } void main(X& this) 35 36 // Inline getters for threads/coroutines/monitors 37 forall( dtype T | is_thread(T) ) 38 static inline coroutine_desc* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; } 34 39 35 40 forall( dtype T | is_thread(T) ) 36 static inline coroutine_desc* get_coroutine(T & this) { 37 return &get_thread(this)->self_cor; 38 } 41 static inline monitor_desc * get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; } 39 42 40 forall( dtype T | is_thread(T) ) 41 static inline monitor_desc* get_monitor(T & this) { 42 return &get_thread(this)->self_mon; 43 } 43 static inline coroutine_desc* get_coroutine(thread_desc * this) __attribute__((const)) { return &this->self_cor; } 44 static inline monitor_desc * get_monitor (thread_desc * this) __attribute__((const)) { return &this->self_mon; } 44 45 45 static inline coroutine_desc* get_coroutine(thread_desc * this) { 46 return &this->self_cor; 47 } 48 49 static inline monitor_desc* get_monitor(thread_desc * this) { 50 return &this->self_mon; 51 } 52 46 //----------------------------------------------------------------------------- 47 // forward declarations needed for threads 53 48 extern struct cluster * mainCluster; 54 49 … … 88 83 void ^?{}( scoped(T)& this ); 89 84 90 void yield(); 91 void yield( unsigned times ); 85 //----------------------------------------------------------------------------- 86 // Thread getters 87 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 92 88 93 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 89 //----------------------------------------------------------------------------- 90 // Scheduler API 91 92 //---------- 93 // Park thread: block until corresponding call to unpark, won't block if unpark is already called 94 void park( void ); 95 96 //---------- 97 // Unpark a thread, if the thread is already blocked, schedule it 98 // if the thread is not yet block, signal that it should rerun immediately 99 void unpark( thread_desc * this ); 100 101 forall( dtype T | is_thread(T) ) 102 static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );} 103 104 //---------- 105 // Yield: force thread to block and be rescheduled 106 bool force_yield( enum __Preemption_Reason ); 107 108 static inline void yield() { 109 force_yield(__MANUAL_PREEMPTION); 110 } 111 112 // Yield: yield N times 113 static inline void yield( unsigned times ) { 114 for( times ) { 115 yield(); 116 } 117 } 94 118 95 119 // Local Variables: // -
tests/concurrent/examples/.expect/datingService.txt
rc744563a ra505021 1 Girl:17 is dating Boy at 2 with ccode 172 Boy:2 is dating Girl 17 with ccode 173 Boy:14 is dating Girl 5 with ccode 54 Girl:5 is dating Boy at 14 with ccode 55 Boy:9 is dating Girl 10 with ccode 106 Girl:10 is dating Boy at 9 with ccode 107 Boy:1 is dating Girl 18 with ccode 188 Girl:18 is dating Boy at 1 with ccode 189 Boy:16 is dating Girl 3 with ccode 310 Girl:3 is dating Boy at 16 with ccode 311 Boy:5 is dating Girl 14 with ccode 1412 Girl:14 is dating Boy at 5 with ccode 1413 Boy:15 is dating Girl 4 with ccode 414 Girl:4 is dating Boy at 15 with ccode 415 Girl:0 is dating Boy at 19 with ccode 016 Boy:19 is dating Girl 0 with ccode 017 Girl:9 is dating Boy at 10 with ccode 918 Boy:10 is dating Girl 9 with ccode 919 Girl:11 is dating Boy at 8 with ccode 1120 Boy:8 is dating Girl 11 with ccode 1121 Boy:12 is dating Girl 7 with ccode 722 Girl:7 is dating Boy at 12 with ccode 723 Boy:11 is dating Girl 8 with ccode 824 Girl:8 is dating Boy at 11 with ccode 825 Girl:16 is dating Boy at 3 with ccode 1626 Boy:3 is dating Girl 16 with ccode 1627 Girl:15 is dating Boy at 4 with ccode 1528 Boy:4 is dating Girl 15 with ccode 1529 Girl:19 is dating Boy at 0 with ccode 1930 Boy:0 is dating Girl 19 with ccode 1931 Girl:2 is dating Boy at 17 with ccode 232 Boy:17 is dating Girl 2 with ccode 233 Boy:13 is dating Girl 6 with ccode 634 Girl:6 is dating Boy at 13 with ccode 635 Boy:7 is dating Girl 12 with ccode 1236 Girl:12 is dating Boy at 7 with ccode 1237 Girl:13 is dating Boy at 6 with ccode 1338 Boy:6 is dating Girl 13 with ccode 1339 Girl:1 is dating Boy at 18 with ccode 140 Boy:18 is dating Girl 1 with ccode 1 -
tests/concurrent/examples/datingService.cfa
rc744563a ra505021 1 1 // 2 2 // Cforall Version 1.0.0 Copyright (C) 2017 University of Waterloo 3 // 3 // 4 4 // The contents of this file are covered under the licence agreement in the 5 5 // file "LICENCE" distributed with Cforall. … … 35 35 signal_block( Boys[ccode] ); // restart boy to set phone number 36 36 } // if 37 sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode;37 //sout | "Girl:" | PhoneNo | "is dating Boy at" | BoyPhoneNo | "with ccode" | ccode; 38 38 return BoyPhoneNo; 39 39 } // DatingService girl … … 47 47 signal_block( Girls[ccode] ); // restart girl to set phone number 48 48 } // if 49 sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode;49 //sout | " Boy:" | PhoneNo | "is dating Girl" | GirlPhoneNo | "with ccode" | ccode; 50 50 return GirlPhoneNo; 51 51 } // DatingService boy -
tests/concurrent/multi-monitor.cfa
rc744563a ra505021 11 11 12 12 void increment( monitor_t & mutex p1, monitor_t & mutex p2, int & value ) { 13 assert(active_thread() == get_monitor(p1)->owner); 14 assert(active_thread() == get_monitor(p2)->owner); 13 15 value += 1; 16 assert(active_thread() == get_monitor(p1)->owner); 17 assert(active_thread() == get_monitor(p2)->owner); 14 18 } 15 19
Note: See TracChangeset
for help on using the changeset viewer.