Changes in / [653f2c7:d43cd01]
- Location:
- src
- Files:
-
- 1 added
- 17 edited
-
benchmark/CorCtxSwitch.c (modified) (1 diff)
-
benchmark/csv-data.c (modified) (1 diff)
-
benchmark/interrupt_linux.c (added)
-
libcfa/concurrency/alarm.c (modified) (8 diffs)
-
libcfa/concurrency/coroutine (modified) (4 diffs)
-
libcfa/concurrency/coroutine.c (modified) (2 diffs)
-
libcfa/concurrency/invoke.c (modified) (5 diffs)
-
libcfa/concurrency/invoke.h (modified) (2 diffs)
-
libcfa/concurrency/kernel (modified) (3 diffs)
-
libcfa/concurrency/kernel.c (modified) (33 diffs)
-
libcfa/concurrency/kernel_private.h (modified) (2 diffs)
-
libcfa/concurrency/monitor (modified) (1 diff)
-
libcfa/concurrency/monitor.c (modified) (16 diffs)
-
libcfa/concurrency/preemption.c (modified) (9 diffs)
-
libcfa/concurrency/thread (modified) (1 diff)
-
libcfa/concurrency/thread.c (modified) (4 diffs)
-
libcfa/libhdr/libdebug.h (modified) (2 diffs)
-
tests/sched-int-block.c (modified) (4 diffs)
Legend:
- Unmodified
- Added
- Removed
-
src/benchmark/CorCtxSwitch.c
r653f2c7 rd43cd01 31 31 32 32 StartTime = Time(); 33 // for ( volatile unsigned int i = 0; i < NoOfTimes; i += 1 ) {34 // resume( this_coroutine() );35 // // resume( &s );36 // }37 33 resumer( &s, NoOfTimes ); 38 34 EndTime = Time(); -
src/benchmark/csv-data.c
r653f2c7 rd43cd01 38 38 39 39 StartTime = Time(); 40 // for ( volatile unsigned int i = 0; i < NoOfTimes; i += 1 ) {41 // resume( this_coroutine() );42 // // resume( &s );43 // }44 40 resumer( &s, NoOfTimes ); 45 41 EndTime = Time(); -
src/libcfa/concurrency/alarm.c
r653f2c7 rd43cd01 16 16 17 17 extern "C" { 18 #include <errno.h> 19 #include <stdio.h> 20 #include <string.h> 18 21 #include <time.h> 22 #include <unistd.h> 19 23 #include <sys/time.h> 20 24 } … … 22 26 #include "alarm.h" 23 27 #include "kernel_private.h" 28 #include "libhdr.h" 24 29 #include "preemption.h" 25 30 … … 31 36 timespec curr; 32 37 clock_gettime( CLOCK_REALTIME, &curr ); 33 return ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec; 38 __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec; 39 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time ); 40 return curr_time; 34 41 } 35 42 36 43 void __kernel_set_timer( __cfa_time_t alarm ) { 44 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %lu\n", (__cfa_time_t)alarm ); 37 45 itimerval val; 38 46 val.it_value.tv_sec = alarm / TIMEGRAN; // seconds … … 71 79 } 72 80 81 LIB_DEBUG_DO( bool validate( alarm_list_t * this ) { 82 alarm_node_t ** it = &this->head; 83 while( (*it) ) { 84 it = &(*it)->next; 85 } 86 87 return it == this->tail; 88 }) 89 73 90 static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) { 74 assert( !n->next );91 verify( !n->next ); 75 92 if( p == this->tail ) { 76 93 this->tail = &n->next; … … 80 97 } 81 98 *p = n; 99 100 verify( validate( this ) ); 82 101 } 83 102 … … 89 108 90 109 insert_at( this, n, it ); 110 111 verify( validate( this ) ); 91 112 } 92 113 … … 100 121 head->next = NULL; 101 122 } 123 verify( validate( this ) ); 102 124 return head; 103 125 } … … 105 127 static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) { 106 128 verify( it ); 107 verify( (*it) ->next== n );129 verify( (*it) == n ); 108 130 109 (*it) ->next= n->next;131 (*it) = n->next; 110 132 if( !n-> next ) { 111 133 this->tail = it; 112 134 } 113 135 n->next = NULL; 136 137 verify( validate( this ) ); 114 138 } 115 139 116 140 static inline void remove( alarm_list_t * this, alarm_node_t * n ) { 117 141 alarm_node_t ** it = &this->head; 118 while( (*it) && (*it) ->next!= n ) {142 while( (*it) && (*it) != n ) { 119 143 it = &(*it)->next; 120 144 } 121 145 146 verify( validate( this ) ); 147 122 148 if( *it ) { remove_at( this, n, it ); } 149 150 verify( validate( this ) ); 123 151 } 124 152 125 153 void register_self( alarm_node_t * this ) { 126 154 disable_interrupts(); 127 assert( !systemProcessor->pending_alarm );128 lock( &systemProcessor->alarm_lock );155 verify( !systemProcessor->pending_alarm ); 156 lock( &systemProcessor->alarm_lock, __PRETTY_FUNCTION__ ); 129 157 { 158 verify( validate( &systemProcessor->alarms ) ); 159 bool first = !systemProcessor->alarms.head; 160 130 161 insert( &systemProcessor->alarms, this ); 131 162 if( systemProcessor->pending_alarm ) { 132 163 tick_preemption(); 133 164 } 165 if( first ) { 166 __kernel_set_timer( systemProcessor->alarms.head->alarm - __kernel_get_time() ); 167 } 134 168 } 135 169 unlock( &systemProcessor->alarm_lock ); 136 170 this->set = true; 137 enable_interrupts( );171 enable_interrupts( __PRETTY_FUNCTION__ ); 138 172 } 139 173 140 174 void unregister_self( alarm_node_t * this ) { 175 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : unregister %p start\n", this ); 141 176 disable_interrupts(); 142 lock( &systemProcessor->alarm_lock ); 143 remove( &systemProcessor->alarms, this ); 177 lock( &systemProcessor->alarm_lock, __PRETTY_FUNCTION__ ); 178 { 179 verify( validate( &systemProcessor->alarms ) ); 180 remove( &systemProcessor->alarms, this ); 181 } 144 182 unlock( &systemProcessor->alarm_lock ); 145 183 disable_interrupts(); 146 184 this->set = false; 185 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Kernel : unregister %p end\n", this ); 147 186 } -
src/libcfa/concurrency/coroutine
r653f2c7 rd43cd01 63 63 64 64 // Get current coroutine 65 coroutine_desc * this_coroutine(void);65 extern volatile thread_local coroutine_desc * this_coroutine; 66 66 67 67 // Private wrappers for context switch and stack creation … … 71 71 // Suspend implementation inlined for performance 72 72 static inline void suspend() { 73 coroutine_desc * src = this_coroutine (); // optimization73 coroutine_desc * src = this_coroutine; // optimization 74 74 75 75 assertf( src->last != 0, … … 88 88 forall(dtype T | is_coroutine(T)) 89 89 static inline void resume(T * cor) { 90 coroutine_desc * src = this_coroutine (); // optimization90 coroutine_desc * src = this_coroutine; // optimization 91 91 coroutine_desc * dst = get_coroutine(cor); 92 92 … … 112 112 113 113 static inline void resume(coroutine_desc * dst) { 114 coroutine_desc * src = this_coroutine (); // optimization114 coroutine_desc * src = this_coroutine; // optimization 115 115 116 116 // not resuming self ? -
src/libcfa/concurrency/coroutine.c
r653f2c7 rd43cd01 32 32 #include "invoke.h" 33 33 34 extern thread_local processor * this_processor;34 extern volatile thread_local processor * this_processor; 35 35 36 36 //----------------------------------------------------------------------------- … … 106 106 107 107 // set state of current coroutine to inactive 108 src->state = Inactive;108 src->state = src->state == Halted ? Halted : Inactive; 109 109 110 110 // set new coroutine that task is executing 111 this_ processor->current_coroutine = dst;111 this_coroutine = dst; 112 112 113 113 // context switch to specified coroutine 114 assert( src->stack.context ); 114 115 CtxSwitch( src->stack.context, dst->stack.context ); 115 116 // when CtxSwitch returns we are back in the src coroutine -
src/libcfa/concurrency/invoke.c
r653f2c7 rd43cd01 29 29 30 30 extern void __suspend_internal(void); 31 extern void __leave_monitor_desc( struct monitor_desc * this ); 31 extern void __leave_thread_monitor( struct thread_desc * this ); 32 extern void disable_interrupts(); 33 extern void enable_interrupts( const char * ); 32 34 33 35 void CtxInvokeCoroutine( 34 void (*main)(void *), 35 struct coroutine_desc *(*get_coroutine)(void *), 36 void (*main)(void *), 37 struct coroutine_desc *(*get_coroutine)(void *), 36 38 void *this 37 39 ) { … … 56 58 57 59 void CtxInvokeThread( 58 void (*dtor)(void *), 59 void (*main)(void *), 60 struct thread_desc *(*get_thread)(void *), 60 void (*dtor)(void *), 61 void (*main)(void *), 62 struct thread_desc *(*get_thread)(void *), 61 63 void *this 62 64 ) { 65 // First suspend, once the thread arrives here, 66 // the function pointer to main can be invalidated without risk 63 67 __suspend_internal(); 64 68 69 // Fetch the thread handle from the user defined thread structure 65 70 struct thread_desc* thrd = get_thread( this ); 66 struct coroutine_desc* cor = &thrd->cor;67 struct monitor_desc* mon = &thrd->mon;68 cor->state = Active;69 71 70 // LIB_DEBUG_PRINTF("Invoke Thread : invoking main %p (args %p)\n", main, this); 72 // Officially start the thread by enabling preemption 73 enable_interrupts( __PRETTY_FUNCTION__ ); 74 75 // Call the main of the thread 71 76 main( this ); 72 77 73 __leave_monitor_desc( mon ); 78 // To exit a thread we must : 79 // 1 - Mark it as halted 80 // 2 - Leave its monitor 81 // 3 - Disable the interupts 82 // The order of these 3 operations is very important 83 __leave_thread_monitor( thrd ); 74 84 75 85 //Final suspend, should never return … … 80 90 81 91 void CtxStart( 82 void (*main)(void *), 83 struct coroutine_desc *(*get_coroutine)(void *), 84 void *this, 92 void (*main)(void *), 93 struct coroutine_desc *(*get_coroutine)(void *), 94 void *this, 85 95 void (*invoke)(void *) 86 96 ) { … … 108 118 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke; 109 119 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 110 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 120 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 111 121 112 122 #elif defined( __x86_64__ ) … … 128 138 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke; 129 139 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 130 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 140 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 131 141 #else 132 142 #error Only __i386__ and __x86_64__ is supported for threads in cfa -
src/libcfa/concurrency/invoke.h
r653f2c7 rd43cd01 31 31 struct spinlock { 32 32 volatile int lock; 33 #ifdef __CFA_DEBUG__ 34 const char * prev; 35 #endif 33 36 }; 34 37 … … 83 86 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor 84 87 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 85 struct monitor_desc * stack_owner; // if bulk acquiring was used we need to synchronize signals with an other monitor86 88 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 87 89 }; -
src/libcfa/concurrency/kernel
r653f2c7 rd43cd01 28 28 //----------------------------------------------------------------------------- 29 29 // Locks 30 bool try_lock( spinlock * ); 31 void lock( spinlock * ); 30 bool try_lock( spinlock * 31 #ifdef __CFA_DEBUG__ 32 , const char * caller 33 #endif 34 ); 35 36 void lock( spinlock * 37 #ifdef __CFA_DEBUG__ 38 , const char * caller 39 #endif 40 ); 41 32 42 void unlock( spinlock * ); 33 43 … … 78 88 struct processorCtx_t * runner; 79 89 cluster * cltr; 80 coroutine_desc * current_coroutine;81 thread_desc * current_thread;82 90 pthread_t kernel_thread; 83 91 … … 90 98 unsigned int preemption; 91 99 92 unsigned short disable_preempt_count;100 bool pending_preemption; 93 101 94 bool pending_preemption;102 char * last_enable; 95 103 }; 96 104 -
src/libcfa/concurrency/kernel.c
r653f2c7 rd43cd01 59 59 // Global state 60 60 61 thread_local processor * this_processor; 62 63 coroutine_desc * this_coroutine(void) { 64 return this_processor->current_coroutine; 65 } 66 67 thread_desc * this_thread(void) { 68 return this_processor->current_thread; 69 } 61 volatile thread_local processor * this_processor; 62 volatile thread_local coroutine_desc * this_coroutine; 63 volatile thread_local thread_desc * this_thread; 64 volatile thread_local unsigned short disable_preempt_count = 1; 70 65 71 66 //----------------------------------------------------------------------------- 72 67 // Main thread construction 73 68 struct current_stack_info_t { 74 machine_context_t ctx; 69 machine_context_t ctx; 75 70 unsigned int size; // size of stack 76 71 void *base; // base of stack … … 106 101 107 102 void ?{}( coroutine_desc * this, current_stack_info_t * info) { 108 (&this->stack){ info }; 103 (&this->stack){ info }; 109 104 this->name = "Main Thread"; 110 105 this->errno_ = 0; … … 136 131 void ?{}(processor * this, cluster * cltr) { 137 132 this->cltr = cltr; 138 this->current_coroutine = NULL;139 this->current_thread = NULL;140 133 (&this->terminated){}; 141 134 this->is_terminated = false; 142 135 this->preemption_alarm = NULL; 143 136 this->preemption = default_preemption(); 144 this->disable_preempt_count = 1; //Start with interrupts disabled145 137 this->pending_preemption = false; 146 138 … … 150 142 void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) { 151 143 this->cltr = cltr; 152 this->current_coroutine = NULL;153 this->current_thread = NULL;154 144 (&this->terminated){}; 155 145 this->is_terminated = false; 156 this->disable_preempt_count = 0; 146 this->preemption_alarm = NULL; 147 this->preemption = default_preemption(); 157 148 this->pending_preemption = false; 149 this->kernel_thread = pthread_self(); 158 150 159 151 this->runner = runner; 160 LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);152 LIB_DEBUG_PRINT_SAFE("Kernel : constructing system processor context %p\n", runner); 161 153 runner{ this }; 162 154 } 155 156 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); ) 163 157 164 158 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) { … … 168 162 169 163 (&this->proc){ cltr, runner }; 164 165 verify( validate( &this->alarms ) ); 170 166 } 171 167 … … 184 180 185 181 void ^?{}(cluster * this) { 186 182 187 183 } 188 184 … … 203 199 204 200 thread_desc * readyThread = NULL; 205 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 201 for( unsigned int spin_count = 0; ! this->is_terminated; spin_count++ ) 206 202 { 207 203 readyThread = nextThread( this->cltr ); … … 209 205 if(readyThread) 210 206 { 207 verify( disable_preempt_count > 0 ); 208 211 209 runThread(this, readyThread); 210 211 verify( disable_preempt_count > 0 ); 212 212 213 213 //Some actions need to be taken from the kernel … … 229 229 } 230 230 231 // runThread runs a thread by context switching 232 // from the processor coroutine to the target thread 231 // runThread runs a thread by context switching 232 // from the processor coroutine to the target thread 233 233 void runThread(processor * this, thread_desc * dst) { 234 234 coroutine_desc * proc_cor = get_coroutine(this->runner); 235 235 coroutine_desc * thrd_cor = get_coroutine(dst); 236 236 237 237 //Reset the terminating actions here 238 238 this->finish.action_code = No_Action; 239 239 240 240 //Update global state 241 this ->current_thread = dst;241 this_thread = dst; 242 242 243 243 // Context Switch to the thread … … 246 246 } 247 247 248 // Once a thread has finished running, some of 248 // Once a thread has finished running, some of 249 249 // its final actions must be executed from the kernel 250 250 void finishRunning(processor * this) { … … 256 256 } 257 257 else if( this->finish.action_code == Release_Schedule ) { 258 unlock( this->finish.lock ); 258 unlock( this->finish.lock ); 259 259 ScheduleThread( this->finish.thrd ); 260 260 } … … 289 289 processor * proc = (processor *) arg; 290 290 this_processor = proc; 291 this_coroutine = NULL; 292 this_thread = NULL; 293 disable_preempt_count = 1; 291 294 // SKULLDUGGERY: We want to create a context for the processor coroutine 292 295 // which is needed for the 2-step context switch. However, there is no reason 293 // to waste the perfectly valid stack create by pthread. 296 // to waste the perfectly valid stack create by pthread. 294 297 current_stack_info_t info; 295 298 machine_context_t ctx; … … 300 303 301 304 //Set global state 302 proc->current_coroutine = &proc->runner->__cor;303 proc->current_thread = NULL;305 this_coroutine = &proc->runner->__cor; 306 this_thread = NULL; 304 307 305 308 //We now have a proper context from which to schedule threads 306 309 LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx); 307 310 308 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 309 // resume it to start it like it normally would, it will just context switch 310 // back to here. Instead directly call the main since we already are on the 311 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 312 // resume it to start it like it normally would, it will just context switch 313 // back to here. Instead directly call the main since we already are on the 311 314 // appropriate stack. 312 315 proc_cor_storage.__cor.state = Active; … … 315 318 316 319 // Main routine of the core returned, the core is now fully terminated 317 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 320 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 318 321 319 322 return NULL; … … 322 325 void start(processor * this) { 323 326 LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this); 324 327 328 // SIGALRM must only be caught by the system processor 329 sigset_t old_mask; 330 bool is_system_proc = this_processor == &systemProcessor->proc; 331 if ( is_system_proc ) { 332 // Child kernel-thread inherits the signal mask from the parent kernel-thread. So one special case for the 333 // system processor creating the user processor => toggle the blocking SIGALRM on system processor, create user 334 // processor, and toggle back (below) previous signal mask of the system processor. 335 336 sigset_t new_mask; 337 sigemptyset( &new_mask ); 338 sigemptyset( &old_mask ); 339 sigaddset( &new_mask, SIGALRM ); 340 341 if ( sigprocmask( SIG_BLOCK, &new_mask, &old_mask ) == -1 ) { 342 abortf( "internal error, sigprocmask" ); 343 } 344 345 assert( ! sigismember( &old_mask, SIGALRM ) ); 346 } 347 325 348 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 326 349 327 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 350 // Toggle back previous signal mask of system processor. 351 if ( is_system_proc ) { 352 if ( sigprocmask( SIG_SETMASK, &old_mask, NULL ) == -1 ) { 353 abortf( "internal error, sigprocmask" ); 354 } // if 355 } // if 356 357 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 328 358 } 329 359 … … 331 361 // Scheduler routines 332 362 void ScheduleThread( thread_desc * thrd ) { 333 if( !thrd ) return; 363 // if( !thrd ) return; 364 assert( thrd ); 365 assert( thrd->cor.state != Halted ); 366 367 verify( disable_preempt_count > 0 ); 334 368 335 369 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 336 337 lock( &systemProcessor->proc.cltr->lock );370 371 lock( &systemProcessor->proc.cltr->lock, __PRETTY_FUNCTION__ ); 338 372 append( &systemProcessor->proc.cltr->ready_queue, thrd ); 339 373 unlock( &systemProcessor->proc.cltr->lock ); 374 375 verify( disable_preempt_count > 0 ); 340 376 } 341 377 342 378 thread_desc * nextThread(cluster * this) { 343 lock( &this->lock ); 379 verify( disable_preempt_count > 0 ); 380 lock( &this->lock, __PRETTY_FUNCTION__ ); 344 381 thread_desc * head = pop_head( &this->ready_queue ); 345 382 unlock( &this->lock ); 383 verify( disable_preempt_count > 0 ); 346 384 return head; 347 385 } 348 386 349 void ScheduleInternal() { 387 void BlockInternal() { 388 disable_interrupts(); 389 verify( disable_preempt_count > 0 ); 350 390 suspend(); 351 } 352 353 void ScheduleInternal( spinlock * lock ) { 391 verify( disable_preempt_count > 0 ); 392 enable_interrupts( __PRETTY_FUNCTION__ ); 393 } 394 395 void BlockInternal( spinlock * lock ) { 396 disable_interrupts(); 354 397 this_processor->finish.action_code = Release; 355 398 this_processor->finish.lock = lock; 399 400 verify( disable_preempt_count > 0 ); 356 401 suspend(); 357 } 358 359 void ScheduleInternal( thread_desc * thrd ) { 402 verify( disable_preempt_count > 0 ); 403 404 enable_interrupts( __PRETTY_FUNCTION__ ); 405 } 406 407 void BlockInternal( thread_desc * thrd ) { 408 disable_interrupts(); 409 assert( thrd->cor.state != Halted ); 360 410 this_processor->finish.action_code = Schedule; 361 411 this_processor->finish.thrd = thrd; 412 413 verify( disable_preempt_count > 0 ); 362 414 suspend(); 363 } 364 365 void ScheduleInternal( spinlock * lock, thread_desc * thrd ) { 415 verify( disable_preempt_count > 0 ); 416 417 enable_interrupts( __PRETTY_FUNCTION__ ); 418 } 419 420 void BlockInternal( spinlock * lock, thread_desc * thrd ) { 421 disable_interrupts(); 366 422 this_processor->finish.action_code = Release_Schedule; 367 423 this_processor->finish.lock = lock; 368 424 this_processor->finish.thrd = thrd; 425 426 verify( disable_preempt_count > 0 ); 369 427 suspend(); 370 } 371 372 void ScheduleInternal(spinlock ** locks, unsigned short count) { 428 verify( disable_preempt_count > 0 ); 429 430 enable_interrupts( __PRETTY_FUNCTION__ ); 431 } 432 433 void BlockInternal(spinlock ** locks, unsigned short count) { 434 disable_interrupts(); 373 435 this_processor->finish.action_code = Release_Multi; 374 436 this_processor->finish.locks = locks; 375 437 this_processor->finish.lock_count = count; 438 439 verify( disable_preempt_count > 0 ); 376 440 suspend(); 377 } 378 379 void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 441 verify( disable_preempt_count > 0 ); 442 443 enable_interrupts( __PRETTY_FUNCTION__ ); 444 } 445 446 void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 447 disable_interrupts(); 380 448 this_processor->finish.action_code = Release_Multi_Schedule; 381 449 this_processor->finish.locks = locks; … … 383 451 this_processor->finish.thrds = thrds; 384 452 this_processor->finish.thrd_count = thrd_count; 453 454 verify( disable_preempt_count > 0 ); 385 455 suspend(); 456 verify( disable_preempt_count > 0 ); 457 458 enable_interrupts( __PRETTY_FUNCTION__ ); 386 459 } 387 460 … … 392 465 // Kernel boot procedures 393 466 void kernel_startup(void) { 394 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 467 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 395 468 396 469 // Start by initializing the main thread 397 // SKULLDUGGERY: the mainThread steals the process main thread 470 // SKULLDUGGERY: the mainThread steals the process main thread 398 471 // which will then be scheduled by the systemProcessor normally 399 472 mainThread = (thread_desc *)&mainThread_storage; … … 403 476 LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n"); 404 477 405 // Enable preemption406 kernel_start_preemption();407 408 478 // Initialize the system cluster 409 479 systemCluster = (cluster *)&systemCluster_storage; … … 417 487 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage }; 418 488 419 // Add the main thread to the ready queue 489 // Add the main thread to the ready queue 420 490 // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread 421 491 ScheduleThread(mainThread); … … 423 493 //initialize the global state variables 424 494 this_processor = &systemProcessor->proc; 425 this_processor->current_thread = mainThread; 426 this_processor->current_coroutine = &mainThread->cor; 495 this_thread = mainThread; 496 this_coroutine = &mainThread->cor; 497 disable_preempt_count = 1; 498 499 // Enable preemption 500 kernel_start_preemption(); 427 501 428 502 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX 429 503 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 430 // mainThread is on the ready queue when this call is made. 504 // mainThread is on the ready queue when this call is made. 431 505 resume( systemProcessor->proc.runner ); 432 506 … … 435 509 // THE SYSTEM IS NOW COMPLETELY RUNNING 436 510 LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n"); 511 512 enable_interrupts( __PRETTY_FUNCTION__ ); 437 513 } 438 514 439 515 void kernel_shutdown(void) { 440 516 LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n"); 517 518 disable_interrupts(); 441 519 442 520 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates. … … 448 526 // THE SYSTEM IS NOW COMPLETELY STOPPED 449 527 528 // Disable preemption 529 kernel_stop_preemption(); 530 450 531 // Destroy the system processor and its context in reverse order of construction 451 532 // These were manually constructed so we need manually destroy them … … 457 538 ^(mainThread){}; 458 539 459 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 540 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 460 541 } 461 542 … … 467 548 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 468 549 // the globalAbort flag is true. 469 lock( &kernel_abort_lock );550 lock( &kernel_abort_lock, __PRETTY_FUNCTION__ ); 470 551 471 552 // first task to abort ? … … 473 554 kernel_abort_called = true; 474 555 unlock( &kernel_abort_lock ); 475 } 556 } 476 557 else { 477 558 unlock( &kernel_abort_lock ); 478 559 479 560 sigset_t mask; 480 561 sigemptyset( &mask ); … … 482 563 sigaddset( &mask, SIGUSR1 ); // block SIGUSR1 signals 483 564 sigsuspend( &mask ); // block the processor to prevent further damage during abort 484 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 485 } 486 487 return this_thread ();565 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 566 } 567 568 return this_thread; 488 569 } 489 570 … … 494 575 __lib_debug_write( STDERR_FILENO, abort_text, len ); 495 576 496 if ( thrd != this_coroutine ()) {497 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine ()->name, this_coroutine());577 if ( thrd != this_coroutine ) { 578 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine ); 498 579 __lib_debug_write( STDERR_FILENO, abort_text, len ); 499 } 580 } 500 581 else { 501 582 __lib_debug_write( STDERR_FILENO, ".\n", 2 ); … … 505 586 extern "C" { 506 587 void __lib_debug_acquire() { 507 lock(&kernel_debug_lock );588 lock(&kernel_debug_lock, __PRETTY_FUNCTION__); 508 589 } 509 590 … … 525 606 } 526 607 527 bool try_lock( spinlock * this ) { 528 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0; 529 } 530 531 void lock( spinlock * this ) { 608 bool try_lock( spinlock * this, const char * caller ) { 609 bool ret = this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0; 610 this->prev = caller; 611 return ret; 612 } 613 614 void lock( spinlock * this, const char * caller ) { 532 615 for ( unsigned int i = 1;; i += 1 ) { 533 616 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break; 534 617 } 618 this->prev = caller; 535 619 } 536 620 … … 547 631 548 632 void wait( signal_once * this ) { 549 lock( &this->lock );633 lock( &this->lock, __PRETTY_FUNCTION__ ); 550 634 if( !this->cond ) { 551 append( &this->blocked, this_thread() ); 552 ScheduleInternal( &this->lock ); 553 lock( &this->lock ); 554 } 555 unlock( &this->lock ); 635 append( &this->blocked, (thread_desc*)this_thread ); 636 BlockInternal( &this->lock ); 637 } 638 else { 639 unlock( &this->lock ); 640 } 556 641 } 557 642 558 643 void signal( signal_once * this ) { 559 lock( &this->lock );644 lock( &this->lock, __PRETTY_FUNCTION__ ); 560 645 { 561 646 this->cond = true; 562 647 648 disable_interrupts(); 563 649 thread_desc * it; 564 650 while( it = pop_head( &this->blocked) ) { 565 651 ScheduleThread( it ); 566 652 } 653 enable_interrupts( __PRETTY_FUNCTION__ ); 567 654 } 568 655 unlock( &this->lock ); … … 590 677 } 591 678 head->next = NULL; 592 } 679 } 593 680 return head; 594 681 } … … 609 696 this->top = top->next; 610 697 top->next = NULL; 611 } 698 } 612 699 return top; 613 700 } -
src/libcfa/concurrency/kernel_private.h
r653f2c7 rd43cd01 27 27 //----------------------------------------------------------------------------- 28 28 // Scheduler 29 30 extern "C" { 31 void disable_interrupts(); 32 void enable_interrupts_noRF(); 33 void enable_interrupts( const char * ); 34 } 35 29 36 void ScheduleThread( thread_desc * ); 37 static inline void WakeThread( thread_desc * thrd ) { 38 if( !thrd ) return; 39 40 disable_interrupts(); 41 ScheduleThread( thrd ); 42 enable_interrupts( __PRETTY_FUNCTION__ ); 43 } 30 44 thread_desc * nextThread(cluster * this); 31 45 32 void ScheduleInternal(void);33 void ScheduleInternal(spinlock * lock);34 void ScheduleInternal(thread_desc * thrd);35 void ScheduleInternal(spinlock * lock, thread_desc * thrd);36 void ScheduleInternal(spinlock ** locks, unsigned short count);37 void ScheduleInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count);46 void BlockInternal(void); 47 void BlockInternal(spinlock * lock); 48 void BlockInternal(thread_desc * thrd); 49 void BlockInternal(spinlock * lock, thread_desc * thrd); 50 void BlockInternal(spinlock ** locks, unsigned short count); 51 void BlockInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count); 38 52 39 53 //----------------------------------------------------------------------------- … … 60 74 extern cluster * systemCluster; 61 75 extern system_proc_t * systemProcessor; 62 extern thread_local processor * this_processor; 63 64 static inline void disable_interrupts() { 65 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 66 assert( prev != (unsigned short) -1 ); 67 } 68 69 static inline void enable_interrupts_noRF() { 70 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 71 verify( prev != (unsigned short) 0 ); 72 } 73 74 static inline void enable_interrupts() { 75 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 76 verify( prev != (unsigned short) 0 ); 77 if( prev == 1 && this_processor->pending_preemption ) { 78 ScheduleInternal( this_processor->current_thread ); 79 this_processor->pending_preemption = false; 80 } 81 } 76 extern volatile thread_local processor * this_processor; 77 extern volatile thread_local coroutine_desc * this_coroutine; 78 extern volatile thread_local thread_desc * this_thread; 79 extern volatile thread_local unsigned short disable_preempt_count; 82 80 83 81 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/monitor
r653f2c7 rd43cd01 26 26 static inline void ?{}(monitor_desc * this) { 27 27 this->owner = NULL; 28 this->stack_owner = NULL;29 28 this->recursion = 0; 30 29 } -
src/libcfa/concurrency/monitor.c
r653f2c7 rd43cd01 44 44 45 45 extern "C" { 46 void __enter_monitor_desc( monitor_desc * this) {47 lock( &this->lock );48 thread_desc * thrd = this_thread ();49 50 LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);46 void __enter_monitor_desc( monitor_desc * this ) { 47 lock( &this->lock, __PRETTY_FUNCTION__ ); 48 thread_desc * thrd = this_thread; 49 50 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion); 51 51 52 52 if( !this->owner ) { … … 62 62 //Some one else has the monitor, wait in line for it 63 63 append( &this->entry_queue, thrd ); 64 LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);65 ScheduleInternal( &this->lock );66 67 // ScheduleInternal will unlock spinlock, no need to unlock ourselves64 // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd); 65 BlockInternal( &this->lock ); 66 67 //BlockInternal will unlock spinlock, no need to unlock ourselves 68 68 return; 69 69 } … … 75 75 // leave pseudo code : 76 76 // TODO 77 void __leave_monitor_desc( monitor_desc * this) {78 lock( &this->lock );79 80 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);81 verifyf( this_thread () == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion );77 void __leave_monitor_desc( monitor_desc * this ) { 78 lock( &this->lock, __PRETTY_FUNCTION__ ); 79 80 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion); 81 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion ); 82 82 83 83 //Leaving a recursion level, decrement the counter … … 96 96 unlock( &this->lock ); 97 97 98 LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);98 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner); 99 99 100 100 //We need to wake-up the thread 101 ScheduleThread( new_owner ); 101 WakeThread( new_owner ); 102 } 103 104 void __leave_thread_monitor( thread_desc * thrd ) { 105 monitor_desc * this = &thrd->mon; 106 lock( &this->lock, __PRETTY_FUNCTION__ ); 107 108 disable_interrupts(); 109 110 thrd->cor.state = Halted; 111 112 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion ); 113 114 //Leaving a recursion level, decrement the counter 115 this->recursion -= 1; 116 117 //If we haven't left the last level of recursion 118 //it means we don't need to do anything 119 if( this->recursion != 0) { 120 unlock( &this->lock ); 121 return; 122 } 123 124 thread_desc * new_owner = next_thread( this ); 125 126 //We can now let other threads in safely 127 unlock( &this->lock ); 128 129 //We need to wake-up the thread 130 if( new_owner) ScheduleThread( new_owner ); 102 131 } 103 132 } … … 121 150 enter( this->m, this->count ); 122 151 123 this->prev_mntrs = this_thread ()->current_monitors;124 this->prev_count = this_thread ()->current_monitor_count;125 126 this_thread ()->current_monitors = m;127 this_thread ()->current_monitor_count = count;152 this->prev_mntrs = this_thread->current_monitors; 153 this->prev_count = this_thread->current_monitor_count; 154 155 this_thread->current_monitors = m; 156 this_thread->current_monitor_count = count; 128 157 } 129 158 … … 131 160 leave( this->m, this->count ); 132 161 133 this_thread ()->current_monitors = this->prev_mntrs;134 this_thread ()->current_monitor_count = this->prev_count;162 this_thread->current_monitors = this->prev_mntrs; 163 this_thread->current_monitor_count = this->prev_count; 135 164 } 136 165 … … 170 199 unsigned short count = this->monitor_count; 171 200 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 172 spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal201 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 173 202 174 203 LIB_DEBUG_PRINT_SAFE("count %i\n", count); 175 204 176 __condition_node_t waiter = { this_thread(), count, user_info };205 __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info }; 177 206 178 207 __condition_criterion_t criteria[count]; … … 208 237 209 238 // Everything is ready to go to sleep 210 ScheduleInternal( locks, count, threads, thread_count );239 BlockInternal( locks, count, threads, thread_count ); 211 240 212 241 … … 234 263 //Some more checking in debug 235 264 LIB_DEBUG_DO( 236 thread_desc * this_thrd = this_thread ();265 thread_desc * this_thrd = this_thread; 237 266 if ( this->monitor_count != this_thrd->current_monitor_count ) { 238 267 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count ); … … 281 310 unsigned short count = this->monitor_count; 282 311 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 283 spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal312 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 284 313 285 314 lock_all( this->monitors, locks, count ); 286 315 287 316 //create creteria 288 __condition_node_t waiter = { this_thread(), count, 0 };317 __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 }; 289 318 290 319 __condition_criterion_t criteria[count]; … … 309 338 310 339 //Everything is ready to go to sleep 311 ScheduleInternal( locks, count, &signallee, 1 );340 BlockInternal( locks, count, &signallee, 1 ); 312 341 313 342 … … 335 364 // Internal scheduling 336 365 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) { 337 // thread_desc * this = this_thread ();366 // thread_desc * this = this_thread; 338 367 339 368 // unsigned short count = this->current_monitor_count; 340 369 // unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 341 // spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal370 // spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 342 371 343 372 // lock_all( this->current_monitors, locks, count ); … … 348 377 349 378 // // // Everything is ready to go to sleep 350 // // ScheduleInternal( locks, count, threads, thread_count );379 // // BlockInternal( locks, count, threads, thread_count ); 351 380 352 381 … … 393 422 static inline void lock_all( spinlock ** locks, unsigned short count ) { 394 423 for( int i = 0; i < count; i++ ) { 395 lock( locks[i] );424 lock( locks[i], __PRETTY_FUNCTION__ ); 396 425 } 397 426 } … … 400 429 for( int i = 0; i < count; i++ ) { 401 430 spinlock * l = &source[i]->lock; 402 lock( l );431 lock( l, __PRETTY_FUNCTION__ ); 403 432 if(locks) locks[i] = l; 404 433 } … … 457 486 458 487 static inline void brand_condition( condition * this ) { 459 thread_desc * thrd = this_thread ();488 thread_desc * thrd = this_thread; 460 489 if( !this->monitors ) { 461 490 LIB_DEBUG_PRINT_SAFE("Branding\n"); -
src/libcfa/concurrency/preemption.c
r653f2c7 rd43cd01 17 17 #include "preemption.h" 18 18 19 19 20 extern "C" { 21 #include <errno.h> 22 #include <execinfo.h> 23 #define __USE_GNU 20 24 #include <signal.h> 21 } 22 23 #define __CFA_DEFAULT_PREEMPTION__ 10 25 #undef __USE_GNU 26 #include <stdio.h> 27 #include <string.h> 28 #include <unistd.h> 29 } 30 31 32 #ifdef __USE_STREAM__ 33 #include "fstream" 34 #endif 35 #include "libhdr.h" 36 37 #define __CFA_DEFAULT_PREEMPTION__ 10000 24 38 25 39 __attribute__((weak)) unsigned int default_preemption() { … … 27 41 } 28 42 43 #define __CFA_SIGCXT__ ucontext_t * 44 #define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt 45 29 46 static void preempt( processor * this ); 30 47 static void timeout( thread_desc * this ); 31 48 49 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 50 void sigHandler_alarm ( __CFA_SIGPARMS__ ); 51 void sigHandler_segv ( __CFA_SIGPARMS__ ); 52 void sigHandler_abort ( __CFA_SIGPARMS__ ); 53 54 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ); 55 32 56 //============================================================================================= 33 57 // Kernel Preemption logic … … 35 59 36 60 void kernel_start_preemption() { 37 38 } 61 LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n"); 62 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); 63 __kernel_sigaction( SIGALRM, sigHandler_alarm , SA_SIGINFO ); 64 __kernel_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); 65 __kernel_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); 66 // __kernel_sigaction( SIGABRT, sigHandler_abort , SA_SIGINFO ); 67 } 68 69 void kernel_stop_preemption() { 70 //Block all signals, we are no longer in a position to handle them 71 sigset_t mask; 72 sigfillset( &mask ); 73 sigprocmask( SIG_BLOCK, &mask, NULL ); 74 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n"); 75 76 // assert( !systemProcessor->alarms.head ); 77 // assert( systemProcessor->alarms.tail == &systemProcessor->alarms.head ); 78 } 79 80 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); ) 39 81 40 82 void tick_preemption() { 83 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption\n" ); 84 41 85 alarm_list_t * alarms = &systemProcessor->alarms; 42 86 __cfa_time_t currtime = __kernel_get_time(); 43 87 while( alarms->head && alarms->head->alarm < currtime ) { 44 88 alarm_node_t * node = pop(alarms); 89 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node ); 90 45 91 if( node->kernel_alarm ) { 46 92 preempt( node->proc ); … … 50 96 } 51 97 98 verify( validate( alarms ) ); 99 52 100 if( node->period > 0 ) { 53 node->alarm +=node->period;101 node->alarm = currtime + node->period; 54 102 insert( alarms, node ); 55 103 } … … 62 110 __kernel_set_timer( alarms->head->alarm - currtime ); 63 111 } 112 113 verify( validate( alarms ) ); 114 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" ); 64 115 } 65 116 66 117 void update_preemption( processor * this, __cfa_time_t duration ) { 67 // assert( THREAD_GETMEM( disableInt ) && THREAD_GETMEM( disableIntCnt ) == 1 ); 118 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %lu\n", this, duration ); 119 68 120 alarm_node_t * alarm = this->preemption_alarm; 121 duration *= 1000; 69 122 70 123 // Alarms need to be enabled … … 97 150 98 151 void ^?{}( preemption_scope * this ) { 152 disable_interrupts(); 153 99 154 update_preemption( this->proc, 0 ); 100 155 } … … 104 159 //============================================================================================= 105 160 161 extern "C" { 162 void disable_interrupts() { 163 __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 164 verify( new_val < (unsigned short)65_000 ); 165 verify( new_val != (unsigned short) 0 ); 166 } 167 168 void enable_interrupts_noRF() { 169 unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 170 verify( prev != (unsigned short) 0 ); 171 } 172 173 void enable_interrupts( const char * func ) { 174 processor * proc = this_processor; 175 thread_desc * thrd = this_thread; 176 unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 177 verify( prev != (unsigned short) 0 ); 178 if( prev == 1 && proc->pending_preemption ) { 179 proc->pending_preemption = false; 180 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Executing deferred CtxSwitch on %p\n", this_processor ); 181 BlockInternal( thrd ); 182 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Executing deferred back\n" ); 183 } 184 185 proc->last_enable = func; 186 } 187 } 188 189 static inline void signal_unblock( int sig ) { 190 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p unblocking sig %i\n", this_processor, sig ); 191 192 sigset_t mask; 193 sigemptyset( &mask ); 194 sigaddset( &mask, sig ); 195 196 if ( sigprocmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) { 197 abortf( "internal error, sigprocmask" ); 198 } // if 199 } 200 106 201 static inline bool preemption_ready() { 107 return this_processor->disable_preempt_count == 0;202 return disable_preempt_count == 0; 108 203 } 109 204 … … 116 211 } 117 212 118 void sigHandler_ctxSwitch( __attribute__((unused)) int sig ) { 213 extern "C" { 214 __attribute__((noinline)) void __debug_break() { 215 pthread_kill( pthread_self(), SIGTRAP ); 216 } 217 } 218 219 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 220 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ctx Switch IRH %p running %p @ %p\n", this_processor, this_thread, (void *)(cxt->uc_mcontext.gregs[REG_RIP]) ); 221 119 222 if( preemption_ready() ) { 120 ScheduleInternal( this_processor->current_thread ); 223 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Blocking thread %p on %p\n", this_thread, this_processor ); 224 signal_unblock( SIGUSR1 ); 225 BlockInternal( (thread_desc*)this_thread ); 226 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Back\n\n"); 227 } 228 else { 229 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ctx Switch IRH : Defering\n" ); 230 defer_ctxSwitch(); 231 signal_unblock( SIGUSR1 ); 232 } 233 } 234 235 void sigHandler_alarm( __CFA_SIGPARMS__ ) { 236 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "\nAlarm IRH %p running %p @ %p\n", this_processor, this_thread, (void *)(cxt->uc_mcontext.gregs[REG_RIP]) ); 237 238 // if( ((intptr_t)cxt->uc_mcontext.gregs[REG_RIP]) > 0xFFFFFF ) __debug_break(); 239 240 if( try_lock( &systemProcessor->alarm_lock, __PRETTY_FUNCTION__ ) ) { 241 tick_preemption(); 242 unlock( &systemProcessor->alarm_lock ); 243 } 244 else { 245 defer_alarm(); 246 } 247 248 signal_unblock( SIGALRM ); 249 250 if( preemption_ready() && this_processor->pending_preemption ) { 251 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Alarm IRH : Blocking thread %p on %p\n", this_thread, this_processor ); 252 this_processor->pending_preemption = false; 253 BlockInternal( (thread_desc*)this_thread ); 254 LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Alarm Switch IRH : Back\n\n"); 255 } 256 } 257 258 static void preempt( processor * this ) { 259 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : signalling %p\n", this ); 260 261 if( this != systemProcessor ) { 262 pthread_kill( this->kernel_thread, SIGUSR1 ); 121 263 } 122 264 else { … … 125 267 } 126 268 127 void sigHandler_alarm( __attribute__((unused)) int sig ) {128 if( try_lock( &systemProcessor->alarm_lock ) ) {129 tick_preemption();130 unlock( &systemProcessor->alarm_lock );131 }132 else {133 defer_alarm();134 }135 }136 137 static void preempt( processor * this ) {138 pthread_kill( this->kernel_thread, SIGUSR1 );139 }140 141 269 static void timeout( thread_desc * this ) { 142 270 //TODO : implement waking threads 143 271 } 272 273 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) { 274 struct sigaction act; 275 276 act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler; 277 act.sa_flags = flags; 278 279 // disabled during signal handler 280 sigemptyset( &act.sa_mask ); 281 sigaddset( &act.sa_mask, sig ); 282 283 if ( sigaction( sig, &act, NULL ) == -1 ) { 284 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 285 " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n", 286 sig, handler, flags, errno, strerror( errno ) 287 ); 288 _exit( EXIT_FAILURE ); 289 } 290 } 291 292 typedef void (*sa_handler_t)(int); 293 294 static void __kernel_sigdefault( int sig ) { 295 struct sigaction act; 296 297 // act.sa_handler = SIG_DFL; 298 act.sa_flags = 0; 299 sigemptyset( &act.sa_mask ); 300 301 if ( sigaction( sig, &act, NULL ) == -1 ) { 302 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 303 " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n", 304 sig, errno, strerror( errno ) 305 ); 306 _exit( EXIT_FAILURE ); 307 } 308 } 309 310 //============================================================================================= 311 // Terminating Signals logic 312 //============================================================================================= 313 314 LIB_DEBUG_DO( 315 static void __kernel_backtrace( int start ) { 316 // skip first N stack frames 317 318 enum { Frames = 50 }; 319 void * array[Frames]; 320 int size = backtrace( array, Frames ); 321 char ** messages = backtrace_symbols( array, size ); 322 323 // find executable name 324 *index( messages[0], '(' ) = '\0'; 325 #ifdef __USE_STREAM__ 326 serr | "Stack back trace for:" | messages[0] | endl; 327 #else 328 fprintf( stderr, "Stack back trace for: %s\n", messages[0]); 329 #endif 330 331 // skip last 2 stack frames after main 332 for ( int i = start; i < size && messages != NULL; i += 1 ) { 333 char * name = NULL; 334 char * offset_begin = NULL; 335 char * offset_end = NULL; 336 337 for ( char *p = messages[i]; *p; ++p ) { 338 // find parantheses and +offset 339 if ( *p == '(' ) { 340 name = p; 341 } 342 else if ( *p == '+' ) { 343 offset_begin = p; 344 } 345 else if ( *p == ')' ) { 346 offset_end = p; 347 break; 348 } 349 } 350 351 // if line contains symbol print it 352 int frameNo = i - start; 353 if ( name && offset_begin && offset_end && name < offset_begin ) { 354 // delimit strings 355 *name++ = '\0'; 356 *offset_begin++ = '\0'; 357 *offset_end++ = '\0'; 358 359 #ifdef __USE_STREAM__ 360 serr | "(" | frameNo | ")" | messages[i] | ":" 361 | name | "+" | offset_begin | offset_end | endl; 362 #else 363 fprintf( stderr, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end); 364 #endif 365 } 366 // otherwise, print the whole line 367 else { 368 #ifdef __USE_STREAM__ 369 serr | "(" | frameNo | ")" | messages[i] | endl; 370 #else 371 fprintf( stderr, "(%i) %s\n", frameNo, messages[i] ); 372 #endif 373 } 374 } 375 376 free( messages ); 377 } 378 ) 379 380 void sigHandler_segv( __CFA_SIGPARMS__ ) { 381 LIB_DEBUG_DO( 382 #ifdef __USE_STREAM__ 383 serr | "*CFA runtime error* program cfa-cpp terminated with" 384 | (sig == SIGSEGV ? "segment fault." : "bus error.") 385 | endl; 386 #else 387 fprintf( stderr, "*CFA runtime error* program cfa-cpp terminated with %s\n", sig == SIGSEGV ? "segment fault." : "bus error." ); 388 #endif 389 390 // skip first 2 stack frames 391 __kernel_backtrace( 1 ); 392 ) 393 exit( EXIT_FAILURE ); 394 } 395 396 // void sigHandler_abort( __CFA_SIGPARMS__ ) { 397 // // skip first 6 stack frames 398 // LIB_DEBUG_DO( __kernel_backtrace( 6 ); ) 399 400 // // reset default signal handler 401 // __kernel_sigdefault( SIGABRT ); 402 403 // raise( SIGABRT ); 404 // } -
src/libcfa/concurrency/thread
r653f2c7 rd43cd01 54 54 } 55 55 56 thread_desc * this_thread(void);56 extern volatile thread_local thread_desc * this_thread; 57 57 58 58 forall( dtype T | is_thread(T) ) -
src/libcfa/concurrency/thread.c
r653f2c7 rd43cd01 28 28 } 29 29 30 extern thread_local processor * this_processor;30 extern volatile thread_local processor * this_processor; 31 31 32 32 //----------------------------------------------------------------------------- … … 71 71 coroutine_desc* thrd_c = get_coroutine(this); 72 72 thread_desc* thrd_h = get_thread (this); 73 thrd_c->last = this_coroutine(); 74 this_processor->current_coroutine = thrd_c; 73 thrd_c->last = this_coroutine; 75 74 76 LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);75 // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); 77 76 77 disable_interrupts(); 78 78 create_stack(&thrd_c->stack, thrd_c->stack.size); 79 this_coroutine = thrd_c; 79 80 CtxStart(this, CtxInvokeThread); 81 assert( thrd_c->last->stack.context ); 80 82 CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context ); 81 83 82 84 ScheduleThread(thrd_h); 85 enable_interrupts( __PRETTY_FUNCTION__ ); 83 86 } 84 87 85 88 void yield( void ) { 86 ScheduleInternal( this_processor->current_thread );89 BlockInternal( (thread_desc *)this_thread ); 87 90 } 88 91 … … 95 98 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 96 99 // set state of current coroutine to inactive 97 src->state = Inactive;100 src->state = src->state == Halted ? Halted : Inactive; 98 101 dst->state = Active; 99 102 … … 103 106 // set new coroutine that the processor is executing 104 107 // and context switch to it 105 this_processor->current_coroutine = dst; 108 this_coroutine = dst; 109 assert( src->stack.context ); 106 110 CtxSwitch( src->stack.context, dst->stack.context ); 107 this_ processor->current_coroutine = src;111 this_coroutine = src; 108 112 109 113 // set state of new coroutine to active 110 dst->state = Inactive;114 dst->state = dst->state == Halted ? Halted : Inactive; 111 115 src->state = Active; 112 116 } -
src/libcfa/libhdr/libdebug.h
r653f2c7 rd43cd01 18 18 19 19 #ifdef __CFA_DEBUG__ 20 #define LIB_DEBUG_DO( x) x21 #define LIB_NO_DEBUG_DO( x) ((void)0)20 #define LIB_DEBUG_DO(...) __VA_ARGS__ 21 #define LIB_NO_DEBUG_DO(...) 22 22 #else 23 #define LIB_DEBUG_DO( x) ((void)0)24 #define LIB_NO_DEBUG_DO( x) x23 #define LIB_DEBUG_DO(...) 24 #define LIB_NO_DEBUG_DO(...) __VA_ARGS__ 25 25 #endif 26 26 … … 51 51 52 52 #ifdef __CFA_DEBUG_PRINT__ 53 #define LIB_DEBUG_WRITE( fd, buffer, len ) __lib_debug_write( fd, buffer, len ) 54 #define LIB_DEBUG_ACQUIRE() __lib_debug_acquire() 55 #define LIB_DEBUG_RELEASE() __lib_debug_release() 56 #define LIB_DEBUG_PRINT_SAFE(...) __lib_debug_print_safe (__VA_ARGS__) 57 #define LIB_DEBUG_PRINT_NOLOCK(...) __lib_debug_print_nolock (__VA_ARGS__) 58 #define LIB_DEBUG_PRINT_BUFFER(...) __lib_debug_print_buffer (__VA_ARGS__) 53 #define LIB_DEBUG_WRITE( fd, buffer, len ) __lib_debug_write( fd, buffer, len ) 54 #define LIB_DEBUG_ACQUIRE() __lib_debug_acquire() 55 #define LIB_DEBUG_RELEASE() __lib_debug_release() 56 #define LIB_DEBUG_PRINT_SAFE(...) __lib_debug_print_safe (__VA_ARGS__) 57 #define LIB_DEBUG_PRINT_NOLOCK(...) __lib_debug_print_nolock (__VA_ARGS__) 58 #define LIB_DEBUG_PRINT_BUFFER(...) __lib_debug_print_buffer (__VA_ARGS__) 59 #define LIB_DEBUG_PRINT_BUFFER_DECL(fd, ...) char text[256]; int len = snprintf( text, 256, __VA_ARGS__ ); __lib_debug_write( fd, text, len ); 60 #define LIB_DEBUG_PRINT_BUFFER_LOCAL(fd, ...) len = snprintf( text, 256, __VA_ARGS__ ); __lib_debug_write( fd, text, len ); 59 61 #else 60 #define LIB_DEBUG_WRITE(...) ((void)0) 61 #define LIB_DEBUG_ACQUIRE() ((void)0) 62 #define LIB_DEBUG_RELEASE() ((void)0) 63 #define LIB_DEBUG_PRINT_SAFE(...) ((void)0) 64 #define LIB_DEBUG_PRINT_NOLOCK(...) ((void)0) 65 #define LIB_DEBUG_PRINT_BUFFER(...) ((void)0) 62 #define LIB_DEBUG_WRITE(...) ((void)0) 63 #define LIB_DEBUG_ACQUIRE() ((void)0) 64 #define LIB_DEBUG_RELEASE() ((void)0) 65 #define LIB_DEBUG_PRINT_SAFE(...) ((void)0) 66 #define LIB_DEBUG_PRINT_NOLOCK(...) ((void)0) 67 #define LIB_DEBUG_PRINT_BUFFER(...) ((void)0) 68 #define LIB_DEBUG_PRINT_BUFFER_DECL(...) ((void)0) 69 #define LIB_DEBUG_PRINT_BUFFER_LOCAL(...) ((void)0) 66 70 #endif 67 71 -
src/tests/sched-int-block.c
r653f2c7 rd43cd01 29 29 //------------------------------------------------------------------------------ 30 30 void wait_op( global_data_t * mutex a, global_data_t * mutex b, unsigned i ) { 31 wait( &cond, (uintptr_t)this_thread ());31 wait( &cond, (uintptr_t)this_thread ); 32 32 33 33 yield( ((unsigned)rand48()) % 10 ); … … 38 38 } 39 39 40 a->last_thread = b->last_thread = this_thread ();40 a->last_thread = b->last_thread = this_thread; 41 41 42 42 yield( ((unsigned)rand48()) % 10 ); … … 54 54 yield( ((unsigned)rand48()) % 10 ); 55 55 56 a->last_thread = b->last_thread = a->last_signaller = b->last_signaller = this_thread ();56 a->last_thread = b->last_thread = a->last_signaller = b->last_signaller = this_thread; 57 57 58 58 if( !is_empty( &cond ) ) { … … 84 84 //------------------------------------------------------------------------------ 85 85 void barge_op( global_data_t * mutex a ) { 86 a->last_thread = this_thread ();86 a->last_thread = this_thread; 87 87 } 88 88
Note:
See TracChangeset
for help on using the changeset viewer.