Changeset 1c273d0
- Timestamp:
- Jun 23, 2017, 10:12:04 AM (7 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- d43cd01
- Parents:
- aa3d77b
- Location:
- src
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/alarm.c
raa3d77b r1c273d0 37 37 clock_gettime( CLOCK_REALTIME, &curr ); 38 38 __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec; 39 LIB_DEBUG_DO( 40 char text[256]; 41 __attribute__((unused)) int len = snprintf( text, 256, "Kernel : current time is %lu\n", curr_time ); 42 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 43 ); 39 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time ); 44 40 return curr_time; 45 41 } 46 42 47 43 void __kernel_set_timer( __cfa_time_t alarm ) { 48 49 LIB_DEBUG_DO( 50 char text[256]; 51 __attribute__((unused)) int len = snprintf( text, 256, "Kernel : set timer to %lu\n", (__cfa_time_t)alarm ); 52 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 53 ); 54 44 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %lu\n", (__cfa_time_t)alarm ); 55 45 itimerval val; 56 46 val.it_value.tv_sec = alarm / TIMEGRAN; // seconds … … 164 154 disable_interrupts(); 165 155 verify( !systemProcessor->pending_alarm ); 166 lock( &systemProcessor->alarm_lock );156 lock( &systemProcessor->alarm_lock, __PRETTY_FUNCTION__ ); 167 157 { 168 158 verify( validate( &systemProcessor->alarms ) ); … … 183 173 184 174 void unregister_self( alarm_node_t * this ) { 185 LIB_DEBUG_DO( 186 char text[256]; 187 __attribute__((unused)) int len = snprintf( text, 256, "Kernel : unregister %p start\n", this ); 188 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 189 ); 175 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : unregister %p start\n", this ); 190 176 disable_interrupts(); 191 lock( &systemProcessor->alarm_lock );177 lock( &systemProcessor->alarm_lock, __PRETTY_FUNCTION__ ); 192 178 { 193 179 verify( validate( &systemProcessor->alarms ) ); … … 197 183 disable_interrupts(); 198 184 this->set = false; 199 LIB_DEBUG_DO( 200 len = snprintf( text, 256, "Kernel : unregister %p end\n", this ); 201 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 202 ); 185 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Kernel : unregister %p end\n", this ); 203 186 } -
src/libcfa/concurrency/coroutine
raa3d77b r1c273d0 63 63 64 64 // Get current coroutine 65 coroutine_desc * this_coroutine(void);65 extern volatile thread_local coroutine_desc * this_coroutine; 66 66 67 67 // Private wrappers for context switch and stack creation … … 71 71 // Suspend implementation inlined for performance 72 72 static inline void suspend() { 73 coroutine_desc * src = this_coroutine (); // optimization73 coroutine_desc * src = this_coroutine; // optimization 74 74 75 75 assertf( src->last != 0, … … 88 88 forall(dtype T | is_coroutine(T)) 89 89 static inline void resume(T * cor) { 90 coroutine_desc * src = this_coroutine (); // optimization90 coroutine_desc * src = this_coroutine; // optimization 91 91 coroutine_desc * dst = get_coroutine(cor); 92 92 … … 112 112 113 113 static inline void resume(coroutine_desc * dst) { 114 coroutine_desc * src = this_coroutine (); // optimization114 coroutine_desc * src = this_coroutine; // optimization 115 115 116 116 // not resuming self ? -
src/libcfa/concurrency/coroutine.c
raa3d77b r1c273d0 106 106 107 107 // set state of current coroutine to inactive 108 src->state = Inactive;108 src->state = src->state == Halted ? Halted : Inactive; 109 109 110 110 // set new coroutine that task is executing 111 this_ processor->current_coroutine = dst;111 this_coroutine = dst; 112 112 113 113 // context switch to specified coroutine 114 assert( src->stack.context ); 114 115 CtxSwitch( src->stack.context, dst->stack.context ); 115 116 // when CtxSwitch returns we are back in the src coroutine -
src/libcfa/concurrency/invoke.c
raa3d77b r1c273d0 29 29 30 30 extern void __suspend_internal(void); 31 extern void __leave_ monitor_desc( struct monitor_desc * this );31 extern void __leave_thread_monitor( struct thread_desc * this ); 32 32 extern void disable_interrupts(); 33 33 extern void enable_interrupts( const char * ); 34 34 35 35 void CtxInvokeCoroutine( 36 void (*main)(void *), 37 struct coroutine_desc *(*get_coroutine)(void *), 36 void (*main)(void *), 37 struct coroutine_desc *(*get_coroutine)(void *), 38 38 void *this 39 39 ) { … … 58 58 59 59 void CtxInvokeThread( 60 void (*dtor)(void *), 61 void (*main)(void *), 62 struct thread_desc *(*get_thread)(void *), 60 void (*dtor)(void *), 61 void (*main)(void *), 62 struct thread_desc *(*get_thread)(void *), 63 63 void *this 64 64 ) { 65 // First suspend, once the thread arrives here, 66 // the function pointer to main can be invalidated without risk 65 67 __suspend_internal(); 66 68 69 // Fetch the thread handle from the user defined thread structure 67 70 struct thread_desc* thrd = get_thread( this ); 68 struct coroutine_desc* cor = &thrd->cor; 69 struct monitor_desc* mon = &thrd->mon; 70 cor->state = Active; 71 72 // Officially start the thread by enabling preemption 71 73 enable_interrupts( __PRETTY_FUNCTION__ ); 72 74 73 // LIB_DEBUG_PRINTF("Invoke Thread : invoking main %p (args %p)\n", main, this);75 // Call the main of the thread 74 76 main( this ); 75 77 76 disable_interrupts(); 77 __leave_monitor_desc( mon ); 78 // To exit a thread we must : 79 // 1 - Mark it as halted 80 // 2 - Leave its monitor 81 // 3 - Disable the interupts 82 // The order of these 3 operations is very important 83 __leave_thread_monitor( thrd ); 78 84 79 85 //Final suspend, should never return … … 84 90 85 91 void CtxStart( 86 void (*main)(void *), 87 struct coroutine_desc *(*get_coroutine)(void *), 88 void *this, 92 void (*main)(void *), 93 struct coroutine_desc *(*get_coroutine)(void *), 94 void *this, 89 95 void (*invoke)(void *) 90 96 ) { … … 112 118 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke; 113 119 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 114 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 120 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 115 121 116 122 #elif defined( __x86_64__ ) … … 132 138 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke; 133 139 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 134 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 140 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 135 141 #else 136 142 #error Only __i386__ and __x86_64__ is supported for threads in cfa -
src/libcfa/concurrency/invoke.h
raa3d77b r1c273d0 31 31 struct spinlock { 32 32 volatile int lock; 33 #ifdef __CFA_DEBUG__ 34 const char * prev; 35 #endif 33 36 }; 34 37 … … 83 86 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor 84 87 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 85 struct monitor_desc * stack_owner; // if bulk acquiring was used we need to synchronize signals with an other monitor86 88 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 87 89 }; -
src/libcfa/concurrency/kernel
raa3d77b r1c273d0 28 28 //----------------------------------------------------------------------------- 29 29 // Locks 30 bool try_lock( spinlock * ); 31 void lock( spinlock * ); 30 bool try_lock( spinlock * 31 #ifdef __CFA_DEBUG__ 32 , const char * caller 33 #endif 34 ); 35 36 void lock( spinlock * 37 #ifdef __CFA_DEBUG__ 38 , const char * caller 39 #endif 40 ); 41 32 42 void unlock( spinlock * ); 33 43 … … 78 88 struct processorCtx_t * runner; 79 89 cluster * cltr; 80 coroutine_desc * current_coroutine;81 thread_desc * current_thread;82 90 pthread_t kernel_thread; 83 91 -
src/libcfa/concurrency/kernel.c
raa3d77b r1c273d0 60 60 61 61 volatile thread_local processor * this_processor; 62 volatile thread_local unsigned short disable_preempt_count; 63 64 coroutine_desc * this_coroutine(void) { 65 return this_processor->current_coroutine; 66 } 67 68 thread_desc * this_thread(void) { 69 return this_processor->current_thread; 70 } 62 volatile thread_local coroutine_desc * this_coroutine; 63 volatile thread_local thread_desc * this_thread; 64 volatile thread_local unsigned short disable_preempt_count = 1; 71 65 72 66 //----------------------------------------------------------------------------- 73 67 // Main thread construction 74 68 struct current_stack_info_t { 75 machine_context_t ctx; 69 machine_context_t ctx; 76 70 unsigned int size; // size of stack 77 71 void *base; // base of stack … … 107 101 108 102 void ?{}( coroutine_desc * this, current_stack_info_t * info) { 109 (&this->stack){ info }; 103 (&this->stack){ info }; 110 104 this->name = "Main Thread"; 111 105 this->errno_ = 0; … … 137 131 void ?{}(processor * this, cluster * cltr) { 138 132 this->cltr = cltr; 139 this->current_coroutine = NULL;140 this->current_thread = NULL;141 133 (&this->terminated){}; 142 134 this->is_terminated = false; … … 150 142 void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) { 151 143 this->cltr = cltr; 152 this->current_coroutine = NULL;153 this->current_thread = NULL;154 144 (&this->terminated){}; 155 145 this->is_terminated = false; … … 190 180 191 181 void ^?{}(cluster * this) { 192 182 193 183 } 194 184 … … 209 199 210 200 thread_desc * readyThread = NULL; 211 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 201 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 212 202 { 213 203 readyThread = nextThread( this->cltr ); … … 239 229 } 240 230 241 // runThread runs a thread by context switching 242 // from the processor coroutine to the target thread 231 // runThread runs a thread by context switching 232 // from the processor coroutine to the target thread 243 233 void runThread(processor * this, thread_desc * dst) { 244 234 coroutine_desc * proc_cor = get_coroutine(this->runner); 245 235 coroutine_desc * thrd_cor = get_coroutine(dst); 246 236 247 237 //Reset the terminating actions here 248 238 this->finish.action_code = No_Action; 249 239 250 240 //Update global state 251 this ->current_thread = dst;241 this_thread = dst; 252 242 253 243 // Context Switch to the thread … … 256 246 } 257 247 258 // Once a thread has finished running, some of 248 // Once a thread has finished running, some of 259 249 // its final actions must be executed from the kernel 260 250 void finishRunning(processor * this) { … … 266 256 } 267 257 else if( this->finish.action_code == Release_Schedule ) { 268 unlock( this->finish.lock ); 258 unlock( this->finish.lock ); 269 259 ScheduleThread( this->finish.thrd ); 270 260 } … … 299 289 processor * proc = (processor *) arg; 300 290 this_processor = proc; 291 this_coroutine = NULL; 292 this_thread = NULL; 301 293 disable_preempt_count = 1; 302 294 // SKULLDUGGERY: We want to create a context for the processor coroutine 303 295 // which is needed for the 2-step context switch. However, there is no reason 304 // to waste the perfectly valid stack create by pthread. 296 // to waste the perfectly valid stack create by pthread. 305 297 current_stack_info_t info; 306 298 machine_context_t ctx; … … 311 303 312 304 //Set global state 313 proc->current_coroutine = &proc->runner->__cor;314 proc->current_thread = NULL;305 this_coroutine = &proc->runner->__cor; 306 this_thread = NULL; 315 307 316 308 //We now have a proper context from which to schedule threads 317 309 LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx); 318 310 319 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 320 // resume it to start it like it normally would, it will just context switch 321 // back to here. Instead directly call the main since we already are on the 311 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 312 // resume it to start it like it normally would, it will just context switch 313 // back to here. Instead directly call the main since we already are on the 322 314 // appropriate stack. 323 315 proc_cor_storage.__cor.state = Active; … … 326 318 327 319 // Main routine of the core returned, the core is now fully terminated 328 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 320 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 329 321 330 322 return NULL; … … 363 355 } // if 364 356 365 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 357 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 366 358 } 367 359 … … 369 361 // Scheduler routines 370 362 void ScheduleThread( thread_desc * thrd ) { 371 if( !thrd ) return; 363 // if( !thrd ) return; 364 assert( thrd ); 365 assert( thrd->cor.state != Halted ); 366 367 verify( disable_preempt_count > 0 ); 372 368 373 369 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 374 375 lock( &systemProcessor->proc.cltr->lock );370 371 lock( &systemProcessor->proc.cltr->lock, __PRETTY_FUNCTION__ ); 376 372 append( &systemProcessor->proc.cltr->ready_queue, thrd ); 377 373 unlock( &systemProcessor->proc.cltr->lock ); 374 375 verify( disable_preempt_count > 0 ); 378 376 } 379 377 380 378 thread_desc * nextThread(cluster * this) { 381 lock( &this->lock ); 379 verify( disable_preempt_count > 0 ); 380 lock( &this->lock, __PRETTY_FUNCTION__ ); 382 381 thread_desc * head = pop_head( &this->ready_queue ); 383 382 unlock( &this->lock ); 383 verify( disable_preempt_count > 0 ); 384 384 return head; 385 385 } … … 407 407 void BlockInternal( thread_desc * thrd ) { 408 408 disable_interrupts(); 409 assert( thrd->cor.state != Halted ); 409 410 this_processor->finish.action_code = Schedule; 410 411 this_processor->finish.thrd = thrd; … … 464 465 // Kernel boot procedures 465 466 void kernel_startup(void) { 466 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 467 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 467 468 468 469 // Start by initializing the main thread 469 // SKULLDUGGERY: the mainThread steals the process main thread 470 // SKULLDUGGERY: the mainThread steals the process main thread 470 471 // which will then be scheduled by the systemProcessor normally 471 472 mainThread = (thread_desc *)&mainThread_storage; … … 486 487 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage }; 487 488 488 // Add the main thread to the ready queue 489 // Add the main thread to the ready queue 489 490 // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread 490 491 ScheduleThread(mainThread); … … 492 493 //initialize the global state variables 493 494 this_processor = &systemProcessor->proc; 494 this_ processor->current_thread = mainThread;495 this_ processor->current_coroutine = &mainThread->cor;495 this_thread = mainThread; 496 this_coroutine = &mainThread->cor; 496 497 disable_preempt_count = 1; 497 498 … … 501 502 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX 502 503 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 503 // mainThread is on the ready queue when this call is made. 504 // mainThread is on the ready queue when this call is made. 504 505 resume( systemProcessor->proc.runner ); 505 506 … … 537 538 ^(mainThread){}; 538 539 539 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 540 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 540 541 } 541 542 … … 547 548 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 548 549 // the globalAbort flag is true. 549 lock( &kernel_abort_lock );550 lock( &kernel_abort_lock, __PRETTY_FUNCTION__ ); 550 551 551 552 // first task to abort ? … … 553 554 kernel_abort_called = true; 554 555 unlock( &kernel_abort_lock ); 555 } 556 } 556 557 else { 557 558 unlock( &kernel_abort_lock ); 558 559 559 560 sigset_t mask; 560 561 sigemptyset( &mask ); … … 562 563 sigaddset( &mask, SIGUSR1 ); // block SIGUSR1 signals 563 564 sigsuspend( &mask ); // block the processor to prevent further damage during abort 564 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 565 } 566 567 return this_thread ();565 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 566 } 567 568 return this_thread; 568 569 } 569 570 … … 574 575 __lib_debug_write( STDERR_FILENO, abort_text, len ); 575 576 576 if ( thrd != this_coroutine ()) {577 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine ()->name, this_coroutine());577 if ( thrd != this_coroutine ) { 578 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine ); 578 579 __lib_debug_write( STDERR_FILENO, abort_text, len ); 579 } 580 } 580 581 else { 581 582 __lib_debug_write( STDERR_FILENO, ".\n", 2 ); … … 585 586 extern "C" { 586 587 void __lib_debug_acquire() { 587 lock(&kernel_debug_lock );588 lock(&kernel_debug_lock, __PRETTY_FUNCTION__); 588 589 } 589 590 … … 605 606 } 606 607 607 bool try_lock( spinlock * this ) { 608 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0; 609 } 610 611 void lock( spinlock * this ) { 608 bool try_lock( spinlock * this, const char * caller ) { 609 bool ret = this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0; 610 this->prev = caller; 611 return ret; 612 } 613 614 void lock( spinlock * this, const char * caller ) { 612 615 for ( unsigned int i = 1;; i += 1 ) { 613 616 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break; 614 617 } 618 this->prev = caller; 615 619 } 616 620 … … 627 631 628 632 void wait( signal_once * this ) { 629 lock( &this->lock );633 lock( &this->lock, __PRETTY_FUNCTION__ ); 630 634 if( !this->cond ) { 631 append( &this->blocked, this_thread());635 append( &this->blocked, (thread_desc*)this_thread ); 632 636 BlockInternal( &this->lock ); 633 637 } … … 638 642 639 643 void signal( signal_once * this ) { 640 lock( &this->lock );644 lock( &this->lock, __PRETTY_FUNCTION__ ); 641 645 { 642 646 this->cond = true; … … 673 677 } 674 678 head->next = NULL; 675 } 679 } 676 680 return head; 677 681 } … … 692 696 this->top = top->next; 693 697 top->next = NULL; 694 } 698 } 695 699 return top; 696 700 } -
src/libcfa/concurrency/kernel_private.h
raa3d77b r1c273d0 27 27 //----------------------------------------------------------------------------- 28 28 // Scheduler 29 30 extern "C" { 31 void disable_interrupts(); 32 void enable_interrupts_noRF(); 33 void enable_interrupts( const char * ); 34 } 35 29 36 void ScheduleThread( thread_desc * ); 37 static inline void WakeThread( thread_desc * thrd ) { 38 if( !thrd ) return; 39 40 disable_interrupts(); 41 ScheduleThread( thrd ); 42 enable_interrupts( __PRETTY_FUNCTION__ ); 43 } 30 44 thread_desc * nextThread(cluster * this); 31 45 … … 61 75 extern system_proc_t * systemProcessor; 62 76 extern volatile thread_local processor * this_processor; 77 extern volatile thread_local coroutine_desc * this_coroutine; 78 extern volatile thread_local thread_desc * this_thread; 63 79 extern volatile thread_local unsigned short disable_preempt_count; 64 65 extern "C" {66 void disable_interrupts();67 void enable_interrupts_noRF();68 void enable_interrupts( const char * );69 }70 80 71 81 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/monitor
raa3d77b r1c273d0 26 26 static inline void ?{}(monitor_desc * this) { 27 27 this->owner = NULL; 28 this->stack_owner = NULL;29 28 this->recursion = 0; 30 29 } -
src/libcfa/concurrency/monitor.c
raa3d77b r1c273d0 44 44 45 45 extern "C" { 46 void __enter_monitor_desc( monitor_desc * this) {47 lock( &this->lock );48 thread_desc * thrd = this_thread ();49 50 LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);46 void __enter_monitor_desc( monitor_desc * this ) { 47 lock( &this->lock, __PRETTY_FUNCTION__ ); 48 thread_desc * thrd = this_thread; 49 50 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion); 51 51 52 52 if( !this->owner ) { … … 62 62 //Some one else has the monitor, wait in line for it 63 63 append( &this->entry_queue, thrd ); 64 LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);64 // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd); 65 65 BlockInternal( &this->lock ); 66 66 … … 75 75 // leave pseudo code : 76 76 // TODO 77 void __leave_monitor_desc( monitor_desc * this) {78 lock( &this->lock );79 80 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);81 verifyf( this_thread () == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion );77 void __leave_monitor_desc( monitor_desc * this ) { 78 lock( &this->lock, __PRETTY_FUNCTION__ ); 79 80 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion); 81 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion ); 82 82 83 83 //Leaving a recursion level, decrement the counter … … 96 96 unlock( &this->lock ); 97 97 98 LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);98 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner); 99 99 100 100 //We need to wake-up the thread 101 ScheduleThread( new_owner ); 101 WakeThread( new_owner ); 102 } 103 104 void __leave_thread_monitor( thread_desc * thrd ) { 105 monitor_desc * this = &thrd->mon; 106 lock( &this->lock, __PRETTY_FUNCTION__ ); 107 108 disable_interrupts(); 109 110 thrd->cor.state = Halted; 111 112 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion ); 113 114 //Leaving a recursion level, decrement the counter 115 this->recursion -= 1; 116 117 //If we haven't left the last level of recursion 118 //it means we don't need to do anything 119 if( this->recursion != 0) { 120 unlock( &this->lock ); 121 return; 122 } 123 124 thread_desc * new_owner = next_thread( this ); 125 126 //We can now let other threads in safely 127 unlock( &this->lock ); 128 129 //We need to wake-up the thread 130 if( new_owner) ScheduleThread( new_owner ); 102 131 } 103 132 } … … 121 150 enter( this->m, this->count ); 122 151 123 this->prev_mntrs = this_thread ()->current_monitors;124 this->prev_count = this_thread ()->current_monitor_count;125 126 this_thread ()->current_monitors = m;127 this_thread ()->current_monitor_count = count;152 this->prev_mntrs = this_thread->current_monitors; 153 this->prev_count = this_thread->current_monitor_count; 154 155 this_thread->current_monitors = m; 156 this_thread->current_monitor_count = count; 128 157 } 129 158 … … 131 160 leave( this->m, this->count ); 132 161 133 this_thread ()->current_monitors = this->prev_mntrs;134 this_thread ()->current_monitor_count = this->prev_count;162 this_thread->current_monitors = this->prev_mntrs; 163 this_thread->current_monitor_count = this->prev_count; 135 164 } 136 165 … … 174 203 LIB_DEBUG_PRINT_SAFE("count %i\n", count); 175 204 176 __condition_node_t waiter = { this_thread(), count, user_info };205 __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info }; 177 206 178 207 __condition_criterion_t criteria[count]; … … 234 263 //Some more checking in debug 235 264 LIB_DEBUG_DO( 236 thread_desc * this_thrd = this_thread ();265 thread_desc * this_thrd = this_thread; 237 266 if ( this->monitor_count != this_thrd->current_monitor_count ) { 238 267 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count ); … … 286 315 287 316 //create creteria 288 __condition_node_t waiter = { this_thread(), count, 0 };317 __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 }; 289 318 290 319 __condition_criterion_t criteria[count]; … … 335 364 // Internal scheduling 336 365 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) { 337 // thread_desc * this = this_thread ();366 // thread_desc * this = this_thread; 338 367 339 368 // unsigned short count = this->current_monitor_count; … … 393 422 static inline void lock_all( spinlock ** locks, unsigned short count ) { 394 423 for( int i = 0; i < count; i++ ) { 395 lock( locks[i] );424 lock( locks[i], __PRETTY_FUNCTION__ ); 396 425 } 397 426 } … … 400 429 for( int i = 0; i < count; i++ ) { 401 430 spinlock * l = &source[i]->lock; 402 lock( l );431 lock( l, __PRETTY_FUNCTION__ ); 403 432 if(locks) locks[i] = l; 404 433 } … … 457 486 458 487 static inline void brand_condition( condition * this ) { 459 thread_desc * thrd = this_thread ();488 thread_desc * thrd = this_thread; 460 489 if( !this->monitors ) { 461 490 LIB_DEBUG_PRINT_SAFE("Branding\n"); -
src/libcfa/concurrency/preemption.c
raa3d77b r1c273d0 20 20 extern "C" { 21 21 #include <errno.h> 22 #include <execinfo.h> 22 23 #define __USE_GNU 23 24 #include <signal.h> … … 28 29 } 29 30 31 32 #ifdef __USE_STREAM__ 33 #include "fstream" 34 #endif 30 35 #include "libhdr.h" 31 36 … … 44 49 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 45 50 void sigHandler_alarm ( __CFA_SIGPARMS__ ); 51 void sigHandler_segv ( __CFA_SIGPARMS__ ); 52 void sigHandler_abort ( __CFA_SIGPARMS__ ); 46 53 47 54 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ); … … 55 62 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); 56 63 __kernel_sigaction( SIGALRM, sigHandler_alarm , SA_SIGINFO ); 64 __kernel_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); 65 __kernel_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); 66 // __kernel_sigaction( SIGABRT, sigHandler_abort , SA_SIGINFO ); 57 67 } 58 68 … … 64 74 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n"); 65 75 66 assert( !systemProcessor->alarms.head );67 assert( systemProcessor->alarms.tail == &systemProcessor->alarms.head );76 // assert( !systemProcessor->alarms.head ); 77 // assert( systemProcessor->alarms.tail == &systemProcessor->alarms.head ); 68 78 } 69 79 … … 71 81 72 82 void tick_preemption() { 73 LIB_DEBUG_DO( 74 char text[256]; 75 __attribute__((unused)) int len = snprintf( text, 256, "Ticking preemption\n" ); 76 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 77 ); 83 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption\n" ); 78 84 79 85 alarm_list_t * alarms = &systemProcessor->alarms; … … 81 87 while( alarms->head && alarms->head->alarm < currtime ) { 82 88 alarm_node_t * node = pop(alarms); 83 LIB_DEBUG_DO( 84 len = snprintf( text, 256, "Ticking %p\n", node ); 85 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 86 ); 89 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node ); 90 87 91 if( node->kernel_alarm ) { 88 92 preempt( node->proc ); … … 92 96 } 93 97 94 LIB_DEBUG_DO( assert( validate( alarms )) );98 verify( validate( alarms ) ); 95 99 96 100 if( node->period > 0 ) { … … 108 112 109 113 verify( validate( alarms ) ); 110 LIB_DEBUG_DO( 111 len = snprintf( text, 256, "Ticking preemption done\n" ); 112 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 113 ); 114 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" ); 114 115 } 115 116 116 117 void update_preemption( processor * this, __cfa_time_t duration ) { 117 LIB_DEBUG_DO( 118 char text[256]; 119 __attribute__((unused)) int len = snprintf( text, 256, "Processor : %p updating preemption to %lu\n", this, duration ); 120 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 121 ); 118 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %lu\n", this, duration ); 122 119 123 120 alarm_node_t * alarm = this->preemption_alarm; … … 175 172 176 173 void enable_interrupts( const char * func ) { 174 processor * proc = this_processor; 175 thread_desc * thrd = this_thread; 177 176 unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 178 177 verify( prev != (unsigned short) 0 ); 179 if( prev == 1 && this_processor->pending_preemption ) { 180 this_processor->pending_preemption = false; 181 LIB_DEBUG_DO( 182 char text[256]; 183 __attribute__((unused)) int len = snprintf( text, 256, "Executing deferred CtxSwitch on %p\n", this_processor ); 184 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 185 ); 186 BlockInternal( this_processor->current_thread ); 187 } 188 189 this_processor->last_enable = func; 190 } 191 } 192 193 static inline void signal_unblock( bool alarm ) { 178 if( prev == 1 && proc->pending_preemption ) { 179 proc->pending_preemption = false; 180 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Executing deferred CtxSwitch on %p\n", this_processor ); 181 BlockInternal( thrd ); 182 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Executing deferred back\n" ); 183 } 184 185 proc->last_enable = func; 186 } 187 } 188 189 static inline void signal_unblock( int sig ) { 190 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p unblocking sig %i\n", this_processor, sig ); 191 194 192 sigset_t mask; 195 193 sigemptyset( &mask ); 196 sigaddset( &mask, SIGUSR1 ); 197 198 if( alarm ) sigaddset( &mask, SIGALRM ); 194 sigaddset( &mask, sig ); 199 195 200 196 if ( sigprocmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) { … … 215 211 } 216 212 213 extern "C" { 214 __attribute__((noinline)) void __debug_break() { 215 pthread_kill( pthread_self(), SIGTRAP ); 216 } 217 } 218 217 219 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 218 219 LIB_DEBUG_DO( 220 char text[256]; 221 __attribute__((unused)) int len = snprintf( text, 256, "Ctx Switch IRH %p\n", (void *)(cxt->uc_mcontext.gregs[REG_RIP])); 222 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 223 ); 224 225 signal_unblock( false ); 220 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ctx Switch IRH %p running %p @ %p\n", this_processor, this_thread, (void *)(cxt->uc_mcontext.gregs[REG_RIP]) ); 221 226 222 if( preemption_ready() ) { 227 LIB_DEBUG_DO( 228 len = snprintf( text, 256, "Ctx Switch IRH : Blocking thread %p on %p\n", this_processor->current_thread, this_processor ); 229 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 230 ); 231 BlockInternal( this_processor->current_thread ); 223 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Blocking thread %p on %p\n", this_thread, this_processor ); 224 signal_unblock( SIGUSR1 ); 225 BlockInternal( (thread_desc*)this_thread ); 226 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Back\n\n"); 232 227 } 233 228 else { 234 LIB_DEBUG_DO( 235 len = snprintf( text, 256, "Ctx Switch IRH : Defering\n" ); 236 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 237 ); 229 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Defering\n" ); 238 230 defer_ctxSwitch(); 231 signal_unblock( SIGUSR1 ); 239 232 } 240 233 } 241 234 242 235 void sigHandler_alarm( __CFA_SIGPARMS__ ) { 243 244 LIB_DEBUG_DO( 245 char text[256]; 246 __attribute__((unused)) int len = snprintf( text, 256, "\nAlarm IRH %p\n", (void *)(cxt->uc_mcontext.gregs[REG_RIP]) ); 247 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 248 ); 249 250 signal_unblock( true ); 251 if( try_lock( &systemProcessor->alarm_lock ) ) { 236 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "\nAlarm IRH %p running %p @ %p\n", this_processor, this_thread, (void *)(cxt->uc_mcontext.gregs[REG_RIP]) ); 237 238 // if( ((intptr_t)cxt->uc_mcontext.gregs[REG_RIP]) > 0xFFFFFF ) __debug_break(); 239 240 if( try_lock( &systemProcessor->alarm_lock, __PRETTY_FUNCTION__ ) ) { 252 241 tick_preemption(); 253 242 unlock( &systemProcessor->alarm_lock ); … … 257 246 } 258 247 248 signal_unblock( SIGALRM ); 249 259 250 if( preemption_ready() && this_processor->pending_preemption ) { 260 LIB_DEBUG_DO( 261 len = snprintf( text, 256, "Alarm IRH : Blocking thread\n" ); 262 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 263 ); 251 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Alarm IRH : Blocking thread %p on %p\n", this_thread, this_processor ); 264 252 this_processor->pending_preemption = false; 265 BlockInternal( this_processor->current_thread ); 253 BlockInternal( (thread_desc*)this_thread ); 254 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Alarm Switch IRH : Back\n\n"); 266 255 } 267 256 } 268 257 269 258 static void preempt( processor * this ) { 270 LIB_DEBUG_DO( 271 char text[256]; 272 __attribute__((unused)) int len = snprintf( text, 256, "Processor : signalling %p\n", this ); 273 LIB_DEBUG_WRITE( STDERR_FILENO, text, len ); 274 ); 259 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : signalling %p\n", this ); 275 260 276 261 if( this != systemProcessor ) { … … 290 275 291 276 act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler; 277 act.sa_flags = flags; 278 279 // disabled during signal handler 292 280 sigemptyset( &act.sa_mask ); 293 sigaddset( &act.sa_mask, SIGALRM ); // disabled during signal handler 294 sigaddset( &act.sa_mask, SIGUSR1 ); 295 296 act.sa_flags = flags; 281 sigaddset( &act.sa_mask, sig ); 297 282 298 283 if ( sigaction( sig, &act, NULL ) == -1 ) { 299 // THE KERNEL IS NOT STARTED SO CALL NO uC++ ROUTINES! 300 char helpText[256]; 301 __attribute__((unused)) int len = snprintf( helpText, 256, " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n", 302 sig, handler, flags, errno, strerror( errno ) ); 303 LIB_DEBUG_WRITE( STDERR_FILENO, helpText, len ); 284 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 285 " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n", 286 sig, handler, flags, errno, strerror( errno ) 287 ); 304 288 _exit( EXIT_FAILURE ); 305 } // if 306 } 289 } 290 } 291 292 typedef void (*sa_handler_t)(int); 293 294 static void __kernel_sigdefault( int sig ) { 295 struct sigaction act; 296 297 // act.sa_handler = SIG_DFL; 298 act.sa_flags = 0; 299 sigemptyset( &act.sa_mask ); 300 301 if ( sigaction( sig, &act, NULL ) == -1 ) { 302 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 303 " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n", 304 sig, errno, strerror( errno ) 305 ); 306 _exit( EXIT_FAILURE ); 307 } 308 } 309 310 //============================================================================================= 311 // Terminating Signals logic 312 //============================================================================================= 313 314 LIB_DEBUG_DO( 315 static void __kernel_backtrace( int start ) { 316 // skip first N stack frames 317 318 enum { Frames = 50 }; 319 void * array[Frames]; 320 int size = backtrace( array, Frames ); 321 char ** messages = backtrace_symbols( array, size ); 322 323 // find executable name 324 *index( messages[0], '(' ) = '\0'; 325 #ifdef __USE_STREAM__ 326 serr | "Stack back trace for:" | messages[0] | endl; 327 #else 328 fprintf( stderr, "Stack back trace for: %s\n", messages[0]); 329 #endif 330 331 // skip last 2 stack frames after main 332 for ( int i = start; i < size && messages != NULL; i += 1 ) { 333 char * name = NULL; 334 char * offset_begin = NULL; 335 char * offset_end = NULL; 336 337 for ( char *p = messages[i]; *p; ++p ) { 338 // find parantheses and +offset 339 if ( *p == '(' ) { 340 name = p; 341 } 342 else if ( *p == '+' ) { 343 offset_begin = p; 344 } 345 else if ( *p == ')' ) { 346 offset_end = p; 347 break; 348 } 349 } 350 351 // if line contains symbol print it 352 int frameNo = i - start; 353 if ( name && offset_begin && offset_end && name < offset_begin ) { 354 // delimit strings 355 *name++ = '\0'; 356 *offset_begin++ = '\0'; 357 *offset_end++ = '\0'; 358 359 #ifdef __USE_STREAM__ 360 serr | "(" | frameNo | ")" | messages[i] | ":" 361 | name | "+" | offset_begin | offset_end | endl; 362 #else 363 fprintf( stderr, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end); 364 #endif 365 } 366 // otherwise, print the whole line 367 else { 368 #ifdef __USE_STREAM__ 369 serr | "(" | frameNo | ")" | messages[i] | endl; 370 #else 371 fprintf( stderr, "(%i) %s\n", frameNo, messages[i] ); 372 #endif 373 } 374 } 375 376 free( messages ); 377 } 378 ) 379 380 void sigHandler_segv( __CFA_SIGPARMS__ ) { 381 LIB_DEBUG_DO( 382 #ifdef __USE_STREAM__ 383 serr | "*CFA runtime error* program cfa-cpp terminated with" 384 | (sig == SIGSEGV ? "segment fault." : "bus error.") 385 | endl; 386 #else 387 fprintf( stderr, "*CFA runtime error* program cfa-cpp terminated with %s\n", sig == SIGSEGV ? "segment fault." : "bus error." ); 388 #endif 389 390 // skip first 2 stack frames 391 __kernel_backtrace( 1 ); 392 ) 393 exit( EXIT_FAILURE ); 394 } 395 396 // void sigHandler_abort( __CFA_SIGPARMS__ ) { 397 // // skip first 6 stack frames 398 // LIB_DEBUG_DO( __kernel_backtrace( 6 ); ) 399 400 // // reset default signal handler 401 // __kernel_sigdefault( SIGABRT ); 402 403 // raise( SIGABRT ); 404 // } -
src/libcfa/concurrency/thread
raa3d77b r1c273d0 54 54 } 55 55 56 thread_desc * this_thread(void);56 extern volatile thread_local thread_desc * this_thread; 57 57 58 58 forall( dtype T | is_thread(T) ) -
src/libcfa/concurrency/thread.c
raa3d77b r1c273d0 71 71 coroutine_desc* thrd_c = get_coroutine(this); 72 72 thread_desc* thrd_h = get_thread (this); 73 thrd_c->last = this_coroutine(); 74 this_processor->current_coroutine = thrd_c; 73 thrd_c->last = this_coroutine; 75 74 76 LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);75 // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); 77 76 77 disable_interrupts(); 78 78 create_stack(&thrd_c->stack, thrd_c->stack.size); 79 this_coroutine = thrd_c; 79 80 CtxStart(this, CtxInvokeThread); 81 assert( thrd_c->last->stack.context ); 80 82 CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context ); 81 83 82 84 ScheduleThread(thrd_h); 85 enable_interrupts( __PRETTY_FUNCTION__ ); 83 86 } 84 87 85 88 void yield( void ) { 86 BlockInternal( this_processor->current_thread );89 BlockInternal( (thread_desc *)this_thread ); 87 90 } 88 91 … … 95 98 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 96 99 // set state of current coroutine to inactive 97 src->state = Inactive;100 src->state = src->state == Halted ? Halted : Inactive; 98 101 dst->state = Active; 99 102 … … 103 106 // set new coroutine that the processor is executing 104 107 // and context switch to it 105 this_processor->current_coroutine = dst; 108 this_coroutine = dst; 109 assert( src->stack.context ); 106 110 CtxSwitch( src->stack.context, dst->stack.context ); 107 this_ processor->current_coroutine = src;111 this_coroutine = src; 108 112 109 113 // set state of new coroutine to active 110 dst->state = Inactive;114 dst->state = dst->state == Halted ? Halted : Inactive; 111 115 src->state = Active; 112 116 } -
src/tests/sched-int-block.c
raa3d77b r1c273d0 29 29 //------------------------------------------------------------------------------ 30 30 void wait_op( global_data_t * mutex a, global_data_t * mutex b, unsigned i ) { 31 wait( &cond, (uintptr_t)this_thread ());31 wait( &cond, (uintptr_t)this_thread ); 32 32 33 33 yield( ((unsigned)rand48()) % 10 ); … … 38 38 } 39 39 40 a->last_thread = b->last_thread = this_thread ();40 a->last_thread = b->last_thread = this_thread; 41 41 42 42 yield( ((unsigned)rand48()) % 10 ); … … 54 54 yield( ((unsigned)rand48()) % 10 ); 55 55 56 a->last_thread = b->last_thread = a->last_signaller = b->last_signaller = this_thread ();56 a->last_thread = b->last_thread = a->last_signaller = b->last_signaller = this_thread; 57 57 58 58 if( !is_empty( &cond ) ) { … … 84 84 //------------------------------------------------------------------------------ 85 85 void barge_op( global_data_t * mutex a ) { 86 a->last_thread = this_thread ();86 a->last_thread = this_thread; 87 87 } 88 88
Note: See TracChangeset
for help on using the changeset viewer.