- Timestamp:
- May 8, 2018, 11:55:33 AM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
- Children:
- 5fec3f6
- Parents:
- 10cfad9
- Location:
- src/libcfa
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/bits/containers.h
r10cfad9 r14a61b5 186 186 #endif 187 187 188 189 //----------------------------------------------------------------------------- 190 // Doubly Linked List 191 //----------------------------------------------------------------------------- 192 #ifdef __cforall 193 trait is_db_node(dtype T) { 194 T*& get_next( T& ); 195 T*& get_prev( T& ); 196 }; 197 #endif 198 199 #ifdef __cforall 200 forall(dtype TYPE | is_db_node(TYPE)) 201 #define T TYPE 202 #else 203 #define T void 204 #endif 205 struct __dllist { 206 T * head; 207 }; 208 #undef T 209 210 #ifdef __cforall 211 #define __dllist_t(T) __dllist(T) 212 #else 213 #define __dllist_t(T) struct __dllist 214 #endif 215 216 #ifdef __cforall 217 218 forall(dtype T | is_db_node(T)) 219 static inline void ?{}( __dllist(T) & this ) with( this ) { 220 head{ NULL }; 221 } 222 223 // forall(dtype T | is_db_node(T) | sized(T)) 224 // static inline void push_front( __dllist(T) & this, T & node ) with( this ) { 225 // if ( head ) { 226 // get_next( node ) = head; 227 // get_prev( node ) = get_prev( *head ); 228 // // inserted node must be consistent before it is seen 229 // // prevent code movement across barrier 230 // asm( "" : : : "memory" ); 231 // get_prev( *head ) = node; 232 // T & prev = *get_prev( node ); 233 // get_next( prev ) = node; 234 // } 235 // else { 236 // get_next( node ) = &node; 237 // get_prev( node ) = &node; 238 // } 239 240 // // prevent code movement across barrier 241 // asm( "" : : : "memory" ); 242 // head = val; 243 // } 244 245 // forall(dtype T | is_db_node(T) | sized(T)) 246 // static inline T * remove( __dllist(T) & this, T & node ) with( this ) { 247 // if ( &node == head ) { 248 // if ( get_next( *head ) == head ) { 249 // head = NULL; 250 // } 251 // else { 252 // head = get_next( *head ); 253 // } 254 // } 255 // get_prev( *get_next( node ) ) = get_prev( node ); 256 // get_next( *get_prev( node ) ) = get_next( node ); 257 // get_next( node ) = NULL; 258 // get_prev( node ) = NULL; 259 // } 260 #endif 261 188 262 //----------------------------------------------------------------------------- 189 263 // Tools -
src/libcfa/concurrency/coroutine
r10cfad9 r14a61b5 72 72 // Suspend implementation inlined for performance 73 73 static inline void suspend() { 74 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 74 // optimization : read TLS once and reuse it 75 // Safety note: this is preemption safe since if 76 // preemption occurs after this line, the pointer 77 // will also migrate which means this value will 78 // stay in syn with the TLS 79 coroutine_desc * src = TL_GET( this_coroutine ); 75 80 76 81 assertf( src->last != 0, … … 89 94 forall(dtype T | is_coroutine(T)) 90 95 static inline void resume(T & cor) { 91 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 96 // optimization : read TLS once and reuse it 97 // Safety note: this is preemption safe since if 98 // preemption occurs after this line, the pointer 99 // will also migrate which means this value will 100 // stay in syn with the TLS 101 coroutine_desc * src = TL_GET( this_coroutine ); 92 102 coroutine_desc * dst = get_coroutine(cor); 93 103 … … 107 117 dst->last = src; 108 118 dst->starter = dst->starter ? dst->starter : src; 109 } // if119 } 110 120 111 121 // always done for performance testing … … 114 124 115 125 static inline void resume(coroutine_desc * dst) { 116 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 126 // optimization : read TLS once and reuse it 127 // Safety note: this is preemption safe since if 128 // preemption occurs after this line, the pointer 129 // will also migrate which means this value will 130 // stay in syn with the TLS 131 coroutine_desc * src = TL_GET( this_coroutine ); 117 132 118 133 // not resuming self ? … … 125 140 // set last resumer 126 141 dst->last = src; 127 } // if142 } 128 143 129 144 // always done for performance testing -
src/libcfa/concurrency/coroutine.c
r10cfad9 r14a61b5 84 84 // Wrapper for co 85 85 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 86 // Safety note : This could cause some false positives due to preemption 86 87 verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate ); 87 88 disable_interrupts(); … … 91 92 92 93 // set new coroutine that task is executing 93 TL_SET( this_coroutine, dst );94 kernelTLS.this_coroutine = dst; 94 95 95 96 // context switch to specified coroutine … … 102 103 103 104 enable_interrupts( __cfaabi_dbg_ctx ); 105 // Safety note : This could cause some false positives due to preemption 104 106 verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate ); 105 107 } //ctxSwitchDirect -
src/libcfa/concurrency/invoke.h
r10cfad9 r14a61b5 18 18 #include "bits/locks.h" 19 19 20 #define TL_GET( member ) kernelT hreadData.member21 #define TL_SET( member, value ) kernelT hreadData.member = value;20 #define TL_GET( member ) kernelTLS.member 21 #define TL_SET( member, value ) kernelTLS.member = value; 22 22 23 23 #ifdef __cforall … … 44 44 volatile bool in_progress; 45 45 } preemption_state; 46 } kernelT hreadData;46 } kernelTLS; 47 47 } 48 48 49 49 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); } 50 static inline struct thread_desc * volatile active_thread() { return TL_GET( this_thread); }51 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); }50 static inline struct thread_desc * volatile active_thread () { return TL_GET( this_thread ); } 51 // static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE 52 52 #endif 53 53 -
src/libcfa/concurrency/kernel
r10cfad9 r14a61b5 53 53 // Preemption rate on this cluster 54 54 Duration preemption_rate; 55 56 // List of idle processors 57 // __dllist_t(struct processor) idles; 55 58 }; 56 59 … … 124 127 bool pending_preemption; 125 128 129 struct { 130 pthread_mutex_t lock; 131 pthread_cond_t cond; 132 } idle; 133 126 134 #ifdef __CFA_DEBUG__ 127 135 // Last function to enable preemption on this processor -
src/libcfa/concurrency/kernel.c
r10cfad9 r14a61b5 56 56 // volatile thread_local unsigned short disable_preempt_count = 1; 57 57 58 thread_local struct KernelThreadData kernelT hreadData= {58 thread_local struct KernelThreadData kernelTLS = { 59 59 NULL, 60 60 NULL, … … 155 155 terminate(&this); 156 156 verify(this.do_terminate); 157 verify( TL_GET( this_processor )!= &this);157 verify( kernelTLS.this_processor != &this); 158 158 P( terminated ); 159 verify( TL_GET( this_processor )!= &this);159 verify( kernelTLS.this_processor != &this); 160 160 pthread_join( kernel_thread, NULL ); 161 161 } … … 196 196 if(readyThread) 197 197 { 198 verify( ! TL_GET( preemption_state ).enabled );198 verify( ! kernelTLS.preemption_state.enabled ); 199 199 200 200 runThread(this, readyThread); 201 201 202 verify( ! TL_GET( preemption_state ).enabled );202 verify( ! kernelTLS.preemption_state.enabled ); 203 203 204 204 //Some actions need to be taken from the kernel … … 221 221 } 222 222 223 // KERNEL ONLY 223 224 // runThread runs a thread by context switching 224 225 // from the processor coroutine to the target thread … … 228 229 coroutine_desc * thrd_cor = dst->curr_cor; 229 230 230 // Reset the terminating actions here231 // Reset the terminating actions here 231 232 this->finish.action_code = No_Action; 232 233 233 // Update global state234 TL_SET( this_thread, dst );234 // Update global state 235 kernelTLS.this_thread = dst; 235 236 236 237 // Context Switch to the thread … … 239 240 } 240 241 242 // KERNEL_ONLY 241 243 void returnToKernel() { 242 coroutine_desc * proc_cor = get_coroutine( TL_GET( this_processor )->runner);243 coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine );244 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 245 coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine; 244 246 ThreadCtxSwitch(thrd_cor, proc_cor); 245 247 } 246 248 249 // KERNEL_ONLY 247 250 // Once a thread has finished running, some of 248 251 // its final actions must be executed from the kernel 249 252 void finishRunning(processor * this) with( this->finish ) { 250 253 if( action_code == Release ) { 251 verify( ! TL_GET( preemption_state ).enabled );254 verify( ! kernelTLS.preemption_state.enabled ); 252 255 unlock( *lock ); 253 256 } … … 256 259 } 257 260 else if( action_code == Release_Schedule ) { 258 verify( ! TL_GET( preemption_state ).enabled );261 verify( ! kernelTLS.preemption_state.enabled ); 259 262 unlock( *lock ); 260 263 ScheduleThread( thrd ); 261 264 } 262 265 else if( action_code == Release_Multi ) { 263 verify( ! TL_GET( preemption_state ).enabled );266 verify( ! kernelTLS.preemption_state.enabled ); 264 267 for(int i = 0; i < lock_count; i++) { 265 268 unlock( *locks[i] ); … … 285 288 } 286 289 290 // KERNEL_ONLY 287 291 // Context invoker for processors 288 292 // This is the entry point for processors (kernel threads) … … 290 294 void * CtxInvokeProcessor(void * arg) { 291 295 processor * proc = (processor *) arg; 292 TL_SET( this_processor, proc );293 TL_SET( this_coroutine, NULL );294 TL_SET( this_thread, NULL );295 TL_GET( preemption_state ).[enabled, disable_count] = [false, 1];296 kernelTLS.this_processor = proc; 297 kernelTLS.this_coroutine = NULL; 298 kernelTLS.this_thread = NULL; 299 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 296 300 // SKULLDUGGERY: We want to create a context for the processor coroutine 297 301 // which is needed for the 2-step context switch. However, there is no reason … … 305 309 306 310 //Set global state 307 TL_SET( this_coroutine, get_coroutine(proc->runner));308 TL_SET( this_thread, NULL );311 kernelTLS.this_coroutine = get_coroutine(proc->runner); 312 kernelTLS.this_thread = NULL; 309 313 310 314 //We now have a proper context from which to schedule threads … … 333 337 } 334 338 339 // KERNEL_ONLY 335 340 void kernel_first_resume(processor * this) { 336 coroutine_desc * src = TL_GET( this_coroutine );341 coroutine_desc * src = kernelTLS.this_coroutine; 337 342 coroutine_desc * dst = get_coroutine(this->runner); 338 343 339 verify( ! TL_GET( preemption_state ).enabled );344 verify( ! kernelTLS.preemption_state.enabled ); 340 345 341 346 create_stack(&dst->stack, dst->stack.size); 342 347 CtxStart(&this->runner, CtxInvokeCoroutine); 343 348 344 verify( ! TL_GET( preemption_state ).enabled );349 verify( ! kernelTLS.preemption_state.enabled ); 345 350 346 351 dst->last = src; … … 351 356 352 357 // set new coroutine that task is executing 353 TL_SET( this_coroutine, dst );358 kernelTLS.this_coroutine = dst; 354 359 355 360 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch. … … 368 373 src->state = Active; 369 374 370 verify( ! TL_GET( preemption_state ).enabled );375 verify( ! kernelTLS.preemption_state.enabled ); 371 376 } 372 377 373 378 //----------------------------------------------------------------------------- 374 379 // Scheduler routines 380 381 // KERNEL ONLY 375 382 void ScheduleThread( thread_desc * thrd ) { 376 // if( ! thrd ) return;377 383 verify( thrd ); 378 384 verify( thrd->self_cor.state != Halted ); 379 385 380 verify( ! TL_GET( preemption_state ).enabled );386 verify( ! kernelTLS.preemption_state.enabled ); 381 387 382 388 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); … … 388 394 } 389 395 390 verify( ! TL_GET( preemption_state ).enabled ); 391 } 392 396 verify( ! kernelTLS.preemption_state.enabled ); 397 } 398 399 // KERNEL ONLY 393 400 thread_desc * nextThread(cluster * this) with( *this ) { 394 verify( ! TL_GET( preemption_state ).enabled );401 verify( ! kernelTLS.preemption_state.enabled ); 395 402 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 396 403 thread_desc * head = pop_head( ready_queue ); 397 404 unlock( ready_queue_lock ); 398 verify( ! TL_GET( preemption_state ).enabled );405 verify( ! kernelTLS.preemption_state.enabled ); 399 406 return head; 400 407 } … … 402 409 void BlockInternal() { 403 410 disable_interrupts(); 404 verify( ! TL_GET( preemption_state ).enabled );411 verify( ! kernelTLS.preemption_state.enabled ); 405 412 returnToKernel(); 406 verify( ! TL_GET( preemption_state ).enabled );413 verify( ! kernelTLS.preemption_state.enabled ); 407 414 enable_interrupts( __cfaabi_dbg_ctx ); 408 415 } … … 410 417 void BlockInternal( __spinlock_t * lock ) { 411 418 disable_interrupts(); 412 with( * TL_GET( this_processor )) {419 with( *kernelTLS.this_processor ) { 413 420 finish.action_code = Release; 414 421 finish.lock = lock; 415 422 } 416 423 417 verify( ! TL_GET( preemption_state ).enabled );424 verify( ! preemption_state.enabled ); 418 425 returnToKernel(); 419 verify( ! TL_GET( preemption_state ).enabled );426 verify( ! preemption_state.enabled ); 420 427 421 428 enable_interrupts( __cfaabi_dbg_ctx ); … … 424 431 void BlockInternal( thread_desc * thrd ) { 425 432 disable_interrupts(); 426 with( * TL_GET( this_processor )) {433 with( * kernelTLS.this_processor ) { 427 434 finish.action_code = Schedule; 428 435 finish.thrd = thrd; 429 436 } 430 437 431 verify( ! TL_GET( preemption_state ).enabled );438 verify( ! kernelTLS.preemption_state.enabled ); 432 439 returnToKernel(); 433 verify( ! TL_GET( preemption_state ).enabled );440 verify( ! kernelTLS.preemption_state.enabled ); 434 441 435 442 enable_interrupts( __cfaabi_dbg_ctx ); … … 439 446 assert(thrd); 440 447 disable_interrupts(); 441 with( * TL_GET( this_processor )) {448 with( * kernelTLS.this_processor ) { 442 449 finish.action_code = Release_Schedule; 443 450 finish.lock = lock; … … 445 452 } 446 453 447 verify( ! TL_GET( preemption_state ).enabled );454 verify( ! kernelTLS.preemption_state.enabled ); 448 455 returnToKernel(); 449 verify( ! TL_GET( preemption_state ).enabled );456 verify( ! kernelTLS.preemption_state.enabled ); 450 457 451 458 enable_interrupts( __cfaabi_dbg_ctx ); … … 454 461 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 455 462 disable_interrupts(); 456 with( * TL_GET( this_processor )) {463 with( * kernelTLS.this_processor ) { 457 464 finish.action_code = Release_Multi; 458 465 finish.locks = locks; … … 460 467 } 461 468 462 verify( ! TL_GET( preemption_state ).enabled );469 verify( ! kernelTLS.preemption_state.enabled ); 463 470 returnToKernel(); 464 verify( ! TL_GET( preemption_state ).enabled );471 verify( ! kernelTLS.preemption_state.enabled ); 465 472 466 473 enable_interrupts( __cfaabi_dbg_ctx ); … … 469 476 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 470 477 disable_interrupts(); 471 with( * TL_GET( this_processor )) {478 with( *kernelTLS.this_processor ) { 472 479 finish.action_code = Release_Multi_Schedule; 473 480 finish.locks = locks; … … 477 484 } 478 485 479 verify( ! TL_GET( preemption_state ).enabled );486 verify( ! kernelTLS.preemption_state.enabled ); 480 487 returnToKernel(); 481 verify( ! TL_GET( preemption_state ).enabled );488 verify( ! kernelTLS.preemption_state.enabled ); 482 489 483 490 enable_interrupts( __cfaabi_dbg_ctx ); 484 491 } 485 492 493 // KERNEL ONLY 486 494 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 487 verify( ! TL_GET( preemption_state ).enabled );488 with( * TL_GET( this_processor )) {495 verify( ! kernelTLS.preemption_state.enabled ); 496 with( * kernelTLS.this_processor ) { 489 497 finish.action_code = thrd ? Release_Schedule : Release; 490 498 finish.lock = lock; … … 501 509 // Kernel boot procedures 502 510 void kernel_startup(void) { 503 verify( ! TL_GET( preemption_state ).enabled );511 verify( ! kernelTLS.preemption_state.enabled ); 504 512 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 505 513 … … 547 555 548 556 //initialize the global state variables 549 TL_SET( this_processor, mainProcessor );550 TL_SET( this_thread, mainThread );551 TL_SET( this_coroutine, &mainThread->self_cor );557 kernelTLS.this_processor = mainProcessor; 558 kernelTLS.this_thread = mainThread; 559 kernelTLS.this_coroutine = &mainThread->self_cor; 552 560 553 561 // Enable preemption … … 561 569 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 562 570 // mainThread is on the ready queue when this call is made. 563 kernel_first_resume( TL_GET( this_processor ));571 kernel_first_resume( kernelTLS.this_processor ); 564 572 565 573 … … 568 576 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 569 577 570 verify( ! TL_GET( preemption_state ).enabled );578 verify( ! kernelTLS.preemption_state.enabled ); 571 579 enable_interrupts( __cfaabi_dbg_ctx ); 572 580 verify( TL_GET( preemption_state ).enabled ); … … 578 586 verify( TL_GET( preemption_state ).enabled ); 579 587 disable_interrupts(); 580 verify( ! TL_GET( preemption_state ).enabled );588 verify( ! kernelTLS.preemption_state.enabled ); 581 589 582 590 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. … … 602 610 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n"); 603 611 } 612 613 //============================================================================================= 614 // Kernel Quiescing 615 //============================================================================================= 616 617 // void halt(processor * this) with( this ) { 618 // pthread_mutex_lock( &idle.lock ); 619 620 621 622 // // SKULLDUGGERY: Even if spurious wake-up is a thing 623 // // spuriously waking up a kernel thread is not a big deal 624 // // if it is very rare. 625 // pthread_cond_wait( &idle.cond, &idle.lock); 626 // pthread_mutex_unlock( &idle.lock ); 627 // } 628 629 // void wake(processor * this) with( this ) { 630 // pthread_mutex_lock (&idle.lock); 631 // pthread_cond_signal (&idle.cond); 632 // pthread_mutex_unlock(&idle.lock); 633 // } 604 634 605 635 //============================================================================================= … … 633 663 } 634 664 635 return TL_GET( this_thread );665 return kernelTLS.this_thread; 636 666 } 637 667 … … 642 672 __cfaabi_dbg_bits_write( abort_text, len ); 643 673 644 if ( get_coroutine(thrd) != TL_GET( this_coroutine )) {645 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ));674 if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) { 675 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine ); 646 676 __cfaabi_dbg_bits_write( abort_text, len ); 647 677 } … … 652 682 653 683 int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { 654 return get_coroutine( TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2;684 return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2; 655 685 } 656 686 … … 682 712 if ( count < 0 ) { 683 713 // queue current task 684 append( waiting, (thread_desc *)TL_GET( this_thread ));714 append( waiting, kernelTLS.this_thread ); 685 715 686 716 // atomically release spin lock and block … … 742 772 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) { 743 773 this.prev_name = prev_name; 744 this.prev_thrd = TL_GET( this_thread );774 this.prev_thrd = kernelTLS.this_thread; 745 775 } 746 776 ) -
src/libcfa/concurrency/monitor.c
r10cfad9 r14a61b5 85 85 // Lock the monitor spinlock 86 86 lock( this->lock __cfaabi_dbg_ctx2 ); 87 thread_desc * thrd = TL_GET( this_thread ); 87 // Interrupts disable inside critical section 88 thread_desc * thrd = kernelTLS.this_thread; 88 89 89 90 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); … … 134 135 // Lock the monitor spinlock 135 136 lock( this->lock __cfaabi_dbg_ctx2 ); 136 thread_desc * thrd = TL_GET( this_thread ); 137 // Interrupts disable inside critical section 138 thread_desc * thrd = kernelTLS.this_thread; 137 139 138 140 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); … … 168 170 169 171 // Create the node specific to this wait operation 170 wait_ctx_primed( TL_GET( this_thread ), 0 )172 wait_ctx_primed( thrd, 0 ) 171 173 172 174 // Some one else has the monitor, wait for him to finish and then run … … 179 181 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 180 182 181 wait_ctx( TL_GET( this_thread ), 0 )183 wait_ctx( thrd, 0 ) 182 184 this->dtor_node = &waiter; 183 185 … … 199 201 lock( this->lock __cfaabi_dbg_ctx2 ); 200 202 201 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", TL_GET( this_thread ), this, this->owner);202 203 verifyf( TL_GET( this_thread ) == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", TL_GET( this_thread ), this->owner, this->recursion, this );203 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 204 205 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 204 206 205 207 // Leaving a recursion level, decrement the counter … … 289 291 // Sorts monitors before entering 290 292 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) { 293 thread_desc * thrd = TL_GET( this_thread ); 294 291 295 // Store current array 292 296 this.m = m; … … 297 301 298 302 // Save previous thread context 299 this.prev = TL_GET( this_thread )->monitors;303 this.prev = thrd->monitors; 300 304 301 305 // Update thread context (needed for conditions) 302 ( TL_GET( this_thread )->monitors){m, count, func};306 (thrd->monitors){m, count, func}; 303 307 304 308 // __cfaabi_dbg_print_safe( "MGUARD : enter %d\n", count); … … 328 332 // Sorts monitors before entering 329 333 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) { 334 // optimization 335 thread_desc * thrd = TL_GET( this_thread ); 336 330 337 // Store current array 331 338 this.m = *m; 332 339 333 340 // Save previous thread context 334 this.prev = TL_GET( this_thread )->monitors;341 this.prev = thrd->monitors; 335 342 336 343 // Update thread context (needed for conditions) 337 ( TL_GET( this_thread )->monitors){m, 1, func};344 (thrd->monitors){m, 1, func}; 338 345 339 346 __enter_monitor_dtor( this.m, func ); … … 566 573 567 574 // Create the node specific to this wait operation 568 wait_ctx_primed( TL_GET( this_thread ), 0 );575 wait_ctx_primed( kernelTLS.this_thread, 0 ); 569 576 570 577 // Save monitor states … … 612 619 613 620 // Create the node specific to this wait operation 614 wait_ctx_primed( TL_GET( this_thread ), 0 );621 wait_ctx_primed( kernelTLS.this_thread, 0 ); 615 622 616 623 monitor_save; … … 618 625 619 626 for( __lock_size_t i = 0; i < count; i++) { 620 verify( monitors[i]->owner == TL_GET( this_thread ));627 verify( monitors[i]->owner == kernelTLS.this_thread ); 621 628 } 622 629 -
src/libcfa/concurrency/preemption.c
r10cfad9 r14a61b5 234 234 } 235 235 236 236 // KERNEL ONLY 237 237 // Check if a CtxSwitch signal handler shoud defer 238 238 // If true : preemption is safe 239 239 // If false : preemption is unsafe and marked as pending 240 240 static inline bool preemption_ready() { 241 bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe 242 TL_GET( this_processor )->pending_preemption = !ready; // Adjust the pending flag accordingly 241 // Check if preemption is safe 242 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress; 243 244 // Adjust the pending flag accordingly 245 kernelTLS.this_processor->pending_preemption = !ready; 243 246 return ready; 244 247 } … … 254 257 255 258 // Start with preemption disabled until ready 256 TL_GET( preemption_state ).enabled = false;257 TL_GET( preemption_state ).disable_count = 1;259 kernelTLS.preemption_state.enabled = false; 260 kernelTLS.preemption_state.disable_count = 1; 258 261 259 262 // Initialize the event kernel … … 320 323 // before the kernel thread has even started running. When that happens an iterrupt 321 324 // we a null 'this_processor' will be caught, just ignore it. 322 if(! TL_GET( this_processor )) return;325 if(! kernelTLS.this_processor ) return; 323 326 324 327 choose(sfp->si_value.sival_int) { 325 328 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 326 case PREEMPT_TERMINATE: verify( TL_GET( this_processor )->do_terminate);329 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate); 327 330 default: 328 331 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 332 335 if( !preemption_ready() ) { return; } 333 336 334 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", TL_GET( this_processor ), TL_GET( this_thread ) ); 335 336 TL_GET( preemption_state ).in_progress = true; // Sync flag : prevent recursive calls to the signal handler 337 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 338 TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag 337 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread ); 338 339 // Sync flag : prevent recursive calls to the signal handler 340 kernelTLS.preemption_state.in_progress = true; 341 342 // We are about to CtxSwitch out of the signal handler, let other handlers in 343 signal_unblock( SIGUSR1 ); 344 345 // TODO: this should go in finish action 346 // Clear the in progress flag 347 kernelTLS.preemption_state.in_progress = false; 339 348 340 349 // Preemption can occur here 341 350 342 BlockInternal( (thread_desc*)TL_GET( this_thread )); // Do the actual CtxSwitch351 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch 343 352 } 344 353 … … 409 418 410 419 void __cfaabi_check_preemption() { 411 bool ready = TL_GET( preemption_state ).enabled;420 bool ready = kernelTLS.preemption_state.enabled; 412 421 if(!ready) { abort("Preemption should be ready"); } 413 422 -
src/libcfa/concurrency/thread.c
r10cfad9 r14a61b5 81 81 disable_interrupts(); 82 82 create_stack(&thrd_c->stack, thrd_c->stack.size); 83 TL_SET( this_coroutine, thrd_c );83 kernelTLS.this_coroutine = thrd_c; 84 84 CtxStart(&this, CtxInvokeThread); 85 85 assert( thrd_c->last->stack.context ); … … 91 91 92 92 extern "C" { 93 // KERNEL ONLY 93 94 void __finish_creation(void) { 94 coroutine_desc* thrd_c = TL_GET( this_coroutine );95 coroutine_desc* thrd_c = kernelTLS.this_coroutine; 95 96 ThreadCtxSwitch( thrd_c, thrd_c->last ); 96 97 } … … 98 99 99 100 void yield( void ) { 100 verify( TL_GET( preemption_state ).enabled ); 101 // Safety note : This could cause some false positives due to preemption 102 verify( TL_GET( preemption_state ).enabled ); 101 103 BlockInternal( TL_GET( this_thread ) ); 102 verify( TL_GET( preemption_state ).enabled ); 104 // Safety note : This could cause some false positives due to preemption 105 verify( TL_GET( preemption_state ).enabled ); 103 106 } 104 107 … … 109 112 } 110 113 114 // KERNEL ONLY 111 115 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 112 116 // set state of current coroutine to inactive … … 116 120 // set new coroutine that the processor is executing 117 121 // and context switch to it 118 TL_SET( this_coroutine, dst );122 kernelTLS.this_coroutine = dst; 119 123 assert( src->stack.context ); 120 124 CtxSwitch( src->stack.context, dst->stack.context ); 121 TL_SET( this_coroutine, src );125 kernelTLS.this_coroutine = src; 122 126 123 127 // set state of new coroutine to active
Note: See TracChangeset
for help on using the changeset viewer.