Changeset ef6851a
- Timestamp:
- Jul 12, 2017, 9:50:58 PM (6 years ago)
- Branches:
- aaron-thesis, arm-eh, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 1a6e855
- Parents:
- 8b47e50 (diff), acb89ed (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src
- Files:
-
- 5 added
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
src/ControlStruct/ExceptTranslate.cc
r8b47e50 ref6851a 10 10 // Created On : Wed Jun 14 16:49:00 2017 11 11 // Last Modified By : Andrew Beach 12 // Last Modified On : Tus Jul 11 16:33:00 201713 // Update Count : 212 // Last Modified On : Wed Jul 12 15:07:00 2017 13 // Update Count : 3 14 14 // 15 15 … … 593 593 594 594 PassVisitor<ExceptionMutatorCore> translator; 595 for ( Declaration * decl : translationUnit ) { 596 decl->acceptMutator( translator ); 597 } 595 mutateAll( translationUnit, translator ); 598 596 } 599 597 } -
src/benchmark/CorCtxSwitch.c
r8b47e50 ref6851a 31 31 32 32 StartTime = Time(); 33 // for ( volatile unsigned int i = 0; i < NoOfTimes; i += 1 ) {34 // resume( this_coroutine() );35 // // resume( &s );36 // }37 33 resumer( &s, NoOfTimes ); 38 34 EndTime = Time(); -
src/benchmark/bench.h
r8b47e50 ref6851a 26 26 #define N 10000000 27 27 #endif 28 29 unsigned int default_preemption() { 30 return 0; 31 } -
src/benchmark/csv-data.c
r8b47e50 ref6851a 25 25 } 26 26 27 #ifndef N28 #define N 10000000029 #endif30 31 27 //----------------------------------------------------------------------------- 32 28 // coroutine context switch … … 38 34 39 35 StartTime = Time(); 40 // for ( volatile unsigned int i = 0; i < NoOfTimes; i += 1 ) {41 // resume( this_coroutine() );42 // // resume( &s );43 // }44 36 resumer( &s, NoOfTimes ); 45 37 EndTime = Time(); … … 104 96 mon_t mon1; 105 97 106 condition cond1a; 98 condition cond1a; 107 99 condition cond1b; 108 100 … … 152 144 mon_t mon2; 153 145 154 condition cond2a; 146 condition cond2a; 155 147 condition cond2b; 156 148 -
src/libcfa/concurrency/alarm.c
r8b47e50 ref6851a 16 16 17 17 extern "C" { 18 #include <errno.h> 19 #include <stdio.h> 20 #include <string.h> 18 21 #include <time.h> 22 #include <unistd.h> 19 23 #include <sys/time.h> 20 24 } 25 26 #include "libhdr.h" 21 27 22 28 #include "alarm.h" … … 31 37 timespec curr; 32 38 clock_gettime( CLOCK_REALTIME, &curr ); 33 return ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec; 39 __cfa_time_t curr_time = ((__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec; 40 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : current time is %lu\n", curr_time ); 41 return curr_time; 34 42 } 35 43 36 44 void __kernel_set_timer( __cfa_time_t alarm ) { 45 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : set timer to %lu\n", (__cfa_time_t)alarm ); 37 46 itimerval val; 38 47 val.it_value.tv_sec = alarm / TIMEGRAN; // seconds … … 71 80 } 72 81 82 LIB_DEBUG_DO( bool validate( alarm_list_t * this ) { 83 alarm_node_t ** it = &this->head; 84 while( (*it) ) { 85 it = &(*it)->next; 86 } 87 88 return it == this->tail; 89 }) 90 73 91 static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) { 74 assert( !n->next );92 verify( !n->next ); 75 93 if( p == this->tail ) { 76 94 this->tail = &n->next; … … 80 98 } 81 99 *p = n; 100 101 verify( validate( this ) ); 82 102 } 83 103 … … 89 109 90 110 insert_at( this, n, it ); 111 112 verify( validate( this ) ); 91 113 } 92 114 … … 100 122 head->next = NULL; 101 123 } 124 verify( validate( this ) ); 102 125 return head; 103 126 } … … 105 128 static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) { 106 129 verify( it ); 107 verify( (*it) ->next== n );130 verify( (*it) == n ); 108 131 109 (*it) ->next= n->next;132 (*it) = n->next; 110 133 if( !n-> next ) { 111 134 this->tail = it; 112 135 } 113 136 n->next = NULL; 137 138 verify( validate( this ) ); 114 139 } 115 140 116 141 static inline void remove( alarm_list_t * this, alarm_node_t * n ) { 117 142 alarm_node_t ** it = &this->head; 118 while( (*it) && (*it) ->next!= n ) {143 while( (*it) && (*it) != n ) { 119 144 it = &(*it)->next; 120 145 } 121 146 147 verify( validate( this ) ); 148 122 149 if( *it ) { remove_at( this, n, it ); } 150 151 verify( validate( this ) ); 123 152 } 124 153 125 154 void register_self( alarm_node_t * this ) { 126 155 disable_interrupts(); 127 assert( !systemProcessor->pending_alarm );128 lock( &systemProcessor->alarm_lock );156 verify( !systemProcessor->pending_alarm ); 157 lock( &systemProcessor->alarm_lock DEBUG_CTX2 ); 129 158 { 159 verify( validate( &systemProcessor->alarms ) ); 160 bool first = !systemProcessor->alarms.head; 161 130 162 insert( &systemProcessor->alarms, this ); 131 163 if( systemProcessor->pending_alarm ) { 132 164 tick_preemption(); 133 165 } 166 if( first ) { 167 __kernel_set_timer( systemProcessor->alarms.head->alarm - __kernel_get_time() ); 168 } 134 169 } 135 170 unlock( &systemProcessor->alarm_lock ); 136 171 this->set = true; 137 enable_interrupts( );172 enable_interrupts( DEBUG_CTX ); 138 173 } 139 174 140 175 void unregister_self( alarm_node_t * this ) { 176 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Kernel : unregister %p start\n", this ); 141 177 disable_interrupts(); 142 lock( &systemProcessor->alarm_lock ); 143 remove( &systemProcessor->alarms, this ); 178 lock( &systemProcessor->alarm_lock DEBUG_CTX2 ); 179 { 180 verify( validate( &systemProcessor->alarms ) ); 181 remove( &systemProcessor->alarms, this ); 182 } 144 183 unlock( &systemProcessor->alarm_lock ); 145 disable_interrupts();184 enable_interrupts( DEBUG_CTX ); 146 185 this->set = false; 186 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Kernel : unregister %p end\n", this ); 147 187 } -
src/libcfa/concurrency/coroutine
r8b47e50 ref6851a 63 63 64 64 // Get current coroutine 65 coroutine_desc * this_coroutine(void);65 extern volatile thread_local coroutine_desc * this_coroutine; 66 66 67 67 // Private wrappers for context switch and stack creation … … 71 71 // Suspend implementation inlined for performance 72 72 static inline void suspend() { 73 coroutine_desc * src = this_coroutine (); // optimization73 coroutine_desc * src = this_coroutine; // optimization 74 74 75 75 assertf( src->last != 0, … … 88 88 forall(dtype T | is_coroutine(T)) 89 89 static inline void resume(T * cor) { 90 coroutine_desc * src = this_coroutine (); // optimization90 coroutine_desc * src = this_coroutine; // optimization 91 91 coroutine_desc * dst = get_coroutine(cor); 92 92 … … 112 112 113 113 static inline void resume(coroutine_desc * dst) { 114 coroutine_desc * src = this_coroutine (); // optimization114 coroutine_desc * src = this_coroutine; // optimization 115 115 116 116 // not resuming self ? -
src/libcfa/concurrency/coroutine.c
r8b47e50 ref6851a 32 32 #include "invoke.h" 33 33 34 extern thread_local processor * this_processor;34 extern volatile thread_local processor * this_processor; 35 35 36 36 //----------------------------------------------------------------------------- … … 44 44 // Coroutine ctors and dtors 45 45 void ?{}(coStack_t* this) { 46 this->size = 10240; // size of stack46 this->size = 65000; // size of stack 47 47 this->storage = NULL; // pointer to stack 48 48 this->limit = NULL; // stack grows towards stack limit … … 50 50 this->context = NULL; // address of cfa_context_t 51 51 this->top = NULL; // address of top of storage 52 this->userStack = false; 52 this->userStack = false; 53 53 } 54 54 … … 106 106 107 107 // set state of current coroutine to inactive 108 src->state = Inactive;108 src->state = src->state == Halted ? Halted : Inactive; 109 109 110 110 // set new coroutine that task is executing 111 this_ processor->current_coroutine = dst;111 this_coroutine = dst; 112 112 113 113 // context switch to specified coroutine 114 assert( src->stack.context ); 114 115 CtxSwitch( src->stack.context, dst->stack.context ); 115 // when CtxSwitch returns we are back in the src coroutine 116 // when CtxSwitch returns we are back in the src coroutine 116 117 117 118 // set state of new coroutine to active … … 131 132 this->size = libCeiling( storageSize, 16 ); 132 133 // use malloc/memalign because "new" raises an exception for out-of-memory 133 134 134 135 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment 135 136 LIB_DEBUG_DO( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) ); -
src/libcfa/concurrency/invoke.c
r8b47e50 ref6851a 29 29 30 30 extern void __suspend_internal(void); 31 extern void __leave_monitor_desc( struct monitor_desc * this ); 31 extern void __leave_thread_monitor( struct thread_desc * this ); 32 extern void disable_interrupts(); 33 extern void enable_interrupts( DEBUG_CTX_PARAM ); 32 34 33 35 void CtxInvokeCoroutine( 34 void (*main)(void *), 35 struct coroutine_desc *(*get_coroutine)(void *), 36 void (*main)(void *), 37 struct coroutine_desc *(*get_coroutine)(void *), 36 38 void *this 37 39 ) { … … 56 58 57 59 void CtxInvokeThread( 58 void (*dtor)(void *), 59 void (*main)(void *), 60 struct thread_desc *(*get_thread)(void *), 60 void (*dtor)(void *), 61 void (*main)(void *), 62 struct thread_desc *(*get_thread)(void *), 61 63 void *this 62 64 ) { 65 // First suspend, once the thread arrives here, 66 // the function pointer to main can be invalidated without risk 63 67 __suspend_internal(); 64 68 69 // Fetch the thread handle from the user defined thread structure 65 70 struct thread_desc* thrd = get_thread( this ); 66 struct coroutine_desc* cor = &thrd->cor;67 struct monitor_desc* mon = &thrd->mon;68 cor->state = Active;69 71 70 // LIB_DEBUG_PRINTF("Invoke Thread : invoking main %p (args %p)\n", main, this); 72 // Officially start the thread by enabling preemption 73 enable_interrupts( DEBUG_CTX ); 74 75 // Call the main of the thread 71 76 main( this ); 72 77 73 __leave_monitor_desc( mon ); 74 78 // To exit a thread we must : 79 // 1 - Mark it as halted 80 // 2 - Leave its monitor 81 // 3 - Disable the interupts 82 // 4 - Final suspend 83 // The order of these 4 operations is very important 75 84 //Final suspend, should never return 76 __ suspend_internal();85 __leave_thread_monitor( thrd ); 77 86 abortf("Resumed dead thread"); 78 87 } … … 80 89 81 90 void CtxStart( 82 void (*main)(void *), 83 struct coroutine_desc *(*get_coroutine)(void *), 84 void *this, 91 void (*main)(void *), 92 struct coroutine_desc *(*get_coroutine)(void *), 93 void *this, 85 94 void (*invoke)(void *) 86 95 ) { … … 108 117 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke; 109 118 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 110 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 119 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 111 120 112 121 #elif defined( __x86_64__ ) … … 128 137 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke; 129 138 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 130 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 139 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 131 140 #else 132 141 #error Only __i386__ and __x86_64__ is supported for threads in cfa -
src/libcfa/concurrency/invoke.h
r8b47e50 ref6851a 31 31 struct spinlock { 32 32 volatile int lock; 33 #ifdef __CFA_DEBUG__ 34 const char * prev_name; 35 void* prev_thrd; 36 #endif 33 37 }; 34 38 … … 83 87 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor 84 88 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 85 struct monitor_desc * stack_owner; // if bulk acquiring was used we need to synchronize signals with an other monitor86 89 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 87 90 }; -
src/libcfa/concurrency/kernel
r8b47e50 ref6851a 28 28 //----------------------------------------------------------------------------- 29 29 // Locks 30 bool try_lock( spinlock * ); 31 void lock( spinlock * ); 32 void unlock( spinlock * ); 30 bool try_lock ( spinlock * DEBUG_CTX_PARAM2 ); 31 void lock ( spinlock * DEBUG_CTX_PARAM2 ); 32 void lock_yield( spinlock * DEBUG_CTX_PARAM2 ); 33 void unlock ( spinlock * ); 33 34 34 struct s ignal_once {35 volatile bool cond;36 struct spinlock lock;37 struct __thread_queue_t blocked;35 struct semaphore { 36 spinlock lock; 37 int count; 38 __thread_queue_t waiting; 38 39 }; 39 40 40 void ?{}(signal_once * this); 41 void ^?{}(signal_once * this); 41 void ?{}(semaphore * this, int count = 1); 42 void ^?{}(semaphore * this); 43 void P(semaphore * this); 44 void V(semaphore * this); 42 45 43 void wait( signal_once * );44 void signal( signal_once * );45 46 46 47 //----------------------------------------------------------------------------- … … 68 69 unsigned short thrd_count; 69 70 }; 70 static inline void ?{}(FinishAction * this) { 71 static inline void ?{}(FinishAction * this) { 71 72 this->action_code = No_Action; 72 73 this->thrd = NULL; … … 78 79 struct processorCtx_t * runner; 79 80 cluster * cltr; 80 coroutine_desc * current_coroutine;81 thread_desc * current_thread;82 81 pthread_t kernel_thread; 83 84 s ignal_once terminated;82 83 semaphore terminated; 85 84 volatile bool is_terminated; 86 85 … … 90 89 unsigned int preemption; 91 90 92 unsigned short disable_preempt_count;91 bool pending_preemption; 93 92 94 bool pending_preemption;93 char * last_enable; 95 94 }; 96 95 -
src/libcfa/concurrency/kernel.c
r8b47e50 ref6851a 15 15 // 16 16 17 #include "startup.h" 18 19 //Start and stop routine for the kernel, declared first to make sure they run first 20 void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 21 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 22 23 //Header 24 #include "kernel_private.h" 17 #include "libhdr.h" 25 18 26 19 //C Includes … … 35 28 36 29 //CFA Includes 37 #include " libhdr.h"30 #include "kernel_private.h" 38 31 #include "preemption.h" 32 #include "startup.h" 39 33 40 34 //Private includes … … 42 36 #include "invoke.h" 43 37 38 //Start and stop routine for the kernel, declared first to make sure they run first 39 void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 40 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 41 44 42 //----------------------------------------------------------------------------- 45 43 // Kernel storage 46 #define KERNEL_STORAGE(T,X) static char X## _storage[sizeof(T)]44 #define KERNEL_STORAGE(T,X) static char X##Storage[sizeof(T)] 47 45 48 46 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx); … … 50 48 KERNEL_STORAGE(system_proc_t, systemProcessor); 51 49 KERNEL_STORAGE(thread_desc, mainThread); 52 KERNEL_STORAGE(machine_context_t, mainThread _context);50 KERNEL_STORAGE(machine_context_t, mainThreadCtx); 53 51 54 52 cluster * systemCluster; … … 59 57 // Global state 60 58 61 thread_local processor * this_processor; 62 63 coroutine_desc * this_coroutine(void) { 64 return this_processor->current_coroutine; 65 } 66 67 thread_desc * this_thread(void) { 68 return this_processor->current_thread; 69 } 59 volatile thread_local processor * this_processor; 60 volatile thread_local coroutine_desc * this_coroutine; 61 volatile thread_local thread_desc * this_thread; 62 volatile thread_local unsigned short disable_preempt_count = 1; 70 63 71 64 //----------------------------------------------------------------------------- … … 91 84 92 85 this->limit = (void *)(((intptr_t)this->base) - this->size); 93 this->context = &mainThread _context_storage;86 this->context = &mainThreadCtxStorage; 94 87 this->top = this->base; 95 88 } … … 136 129 void ?{}(processor * this, cluster * cltr) { 137 130 this->cltr = cltr; 138 this->current_coroutine = NULL; 139 this->current_thread = NULL; 140 (&this->terminated){}; 131 (&this->terminated){ 0 }; 141 132 this->is_terminated = false; 142 133 this->preemption_alarm = NULL; 143 134 this->preemption = default_preemption(); 144 this->disable_preempt_count = 1; //Start with interrupts disabled145 135 this->pending_preemption = false; 146 136 … … 150 140 void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) { 151 141 this->cltr = cltr; 152 this->current_coroutine = NULL; 153 this->current_thread = NULL; 154 (&this->terminated){}; 142 (&this->terminated){ 0 }; 155 143 this->is_terminated = false; 156 this->disable_preempt_count = 0; 144 this->preemption_alarm = NULL; 145 this->preemption = default_preemption(); 157 146 this->pending_preemption = false; 147 this->kernel_thread = pthread_self(); 158 148 159 149 this->runner = runner; 160 LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);150 LIB_DEBUG_PRINT_SAFE("Kernel : constructing system processor context %p\n", runner); 161 151 runner{ this }; 162 152 } 153 154 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); ) 163 155 164 156 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) { … … 168 160 169 161 (&this->proc){ cltr, runner }; 162 163 verify( validate( &this->alarms ) ); 170 164 } 171 165 … … 174 168 LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", this); 175 169 this->is_terminated = true; 176 wait( &this->terminated ); 170 P( &this->terminated ); 171 pthread_join( this->kernel_thread, NULL ); 177 172 } 178 173 } … … 209 204 if(readyThread) 210 205 { 206 verify( disable_preempt_count > 0 ); 207 211 208 runThread(this, readyThread); 209 210 verify( disable_preempt_count > 0 ); 212 211 213 212 //Some actions need to be taken from the kernel … … 225 224 } 226 225 227 signal( &this->terminated ); 226 V( &this->terminated ); 227 228 228 LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this); 229 229 } … … 239 239 240 240 //Update global state 241 this ->current_thread = dst;241 this_thread = dst; 242 242 243 243 // Context Switch to the thread … … 289 289 processor * proc = (processor *) arg; 290 290 this_processor = proc; 291 this_coroutine = NULL; 292 this_thread = NULL; 293 disable_preempt_count = 1; 291 294 // SKULLDUGGERY: We want to create a context for the processor coroutine 292 295 // which is needed for the 2-step context switch. However, there is no reason … … 300 303 301 304 //Set global state 302 proc->current_coroutine = &proc->runner->__cor;303 proc->current_thread = NULL;305 this_coroutine = &proc->runner->__cor; 306 this_thread = NULL; 304 307 305 308 //We now have a proper context from which to schedule threads … … 331 334 // Scheduler routines 332 335 void ScheduleThread( thread_desc * thrd ) { 333 if( !thrd ) return; 336 // if( !thrd ) return; 337 assert( thrd ); 338 assert( thrd->cor.state != Halted ); 339 340 verify( disable_preempt_count > 0 ); 334 341 335 342 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 336 343 337 lock( &systemProcessor->proc.cltr->lock );344 lock( &systemProcessor->proc.cltr->lock DEBUG_CTX2 ); 338 345 append( &systemProcessor->proc.cltr->ready_queue, thrd ); 339 346 unlock( &systemProcessor->proc.cltr->lock ); 347 348 verify( disable_preempt_count > 0 ); 340 349 } 341 350 342 351 thread_desc * nextThread(cluster * this) { 343 lock( &this->lock ); 352 verify( disable_preempt_count > 0 ); 353 lock( &this->lock DEBUG_CTX2 ); 344 354 thread_desc * head = pop_head( &this->ready_queue ); 345 355 unlock( &this->lock ); 356 verify( disable_preempt_count > 0 ); 346 357 return head; 347 358 } 348 359 349 void ScheduleInternal() { 360 void BlockInternal() { 361 disable_interrupts(); 362 verify( disable_preempt_count > 0 ); 350 363 suspend(); 351 } 352 353 void ScheduleInternal( spinlock * lock ) { 364 verify( disable_preempt_count > 0 ); 365 enable_interrupts( DEBUG_CTX ); 366 } 367 368 void BlockInternal( spinlock * lock ) { 369 disable_interrupts(); 354 370 this_processor->finish.action_code = Release; 355 371 this_processor->finish.lock = lock; 372 373 verify( disable_preempt_count > 0 ); 356 374 suspend(); 357 } 358 359 void ScheduleInternal( thread_desc * thrd ) { 375 verify( disable_preempt_count > 0 ); 376 377 enable_interrupts( DEBUG_CTX ); 378 } 379 380 void BlockInternal( thread_desc * thrd ) { 381 disable_interrupts(); 382 assert( thrd->cor.state != Halted ); 360 383 this_processor->finish.action_code = Schedule; 361 384 this_processor->finish.thrd = thrd; 385 386 verify( disable_preempt_count > 0 ); 362 387 suspend(); 363 } 364 365 void ScheduleInternal( spinlock * lock, thread_desc * thrd ) { 388 verify( disable_preempt_count > 0 ); 389 390 enable_interrupts( DEBUG_CTX ); 391 } 392 393 void BlockInternal( spinlock * lock, thread_desc * thrd ) { 394 disable_interrupts(); 366 395 this_processor->finish.action_code = Release_Schedule; 367 396 this_processor->finish.lock = lock; 368 397 this_processor->finish.thrd = thrd; 398 399 verify( disable_preempt_count > 0 ); 369 400 suspend(); 370 } 371 372 void ScheduleInternal(spinlock ** locks, unsigned short count) { 401 verify( disable_preempt_count > 0 ); 402 403 enable_interrupts( DEBUG_CTX ); 404 } 405 406 void BlockInternal(spinlock ** locks, unsigned short count) { 407 disable_interrupts(); 373 408 this_processor->finish.action_code = Release_Multi; 374 409 this_processor->finish.locks = locks; 375 410 this_processor->finish.lock_count = count; 411 412 verify( disable_preempt_count > 0 ); 376 413 suspend(); 377 } 378 379 void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 414 verify( disable_preempt_count > 0 ); 415 416 enable_interrupts( DEBUG_CTX ); 417 } 418 419 void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 420 disable_interrupts(); 380 421 this_processor->finish.action_code = Release_Multi_Schedule; 381 422 this_processor->finish.locks = locks; … … 383 424 this_processor->finish.thrds = thrds; 384 425 this_processor->finish.thrd_count = thrd_count; 426 427 verify( disable_preempt_count > 0 ); 428 suspend(); 429 verify( disable_preempt_count > 0 ); 430 431 enable_interrupts( DEBUG_CTX ); 432 } 433 434 void LeaveThread(spinlock * lock, thread_desc * thrd) { 435 verify( disable_preempt_count > 0 ); 436 this_processor->finish.action_code = thrd ? Release_Schedule : Release; 437 this_processor->finish.lock = lock; 438 this_processor->finish.thrd = thrd; 439 385 440 suspend(); 386 441 } … … 397 452 // SKULLDUGGERY: the mainThread steals the process main thread 398 453 // which will then be scheduled by the systemProcessor normally 399 mainThread = (thread_desc *)&mainThread _storage;454 mainThread = (thread_desc *)&mainThreadStorage; 400 455 current_stack_info_t info; 401 456 mainThread{ &info }; … … 403 458 LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n"); 404 459 405 // Enable preemption406 kernel_start_preemption();407 408 460 // Initialize the system cluster 409 systemCluster = (cluster *)&systemCluster _storage;461 systemCluster = (cluster *)&systemClusterStorage; 410 462 systemCluster{}; 411 463 … … 414 466 // Initialize the system processor and the system processor ctx 415 467 // (the coroutine that contains the processing control flow) 416 systemProcessor = (system_proc_t *)&systemProcessor _storage;417 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx _storage };468 systemProcessor = (system_proc_t *)&systemProcessorStorage; 469 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtxStorage }; 418 470 419 471 // Add the main thread to the ready queue … … 423 475 //initialize the global state variables 424 476 this_processor = &systemProcessor->proc; 425 this_processor->current_thread = mainThread; 426 this_processor->current_coroutine = &mainThread->cor; 477 this_thread = mainThread; 478 this_coroutine = &mainThread->cor; 479 disable_preempt_count = 1; 480 481 // Enable preemption 482 kernel_start_preemption(); 427 483 428 484 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX … … 435 491 // THE SYSTEM IS NOW COMPLETELY RUNNING 436 492 LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n"); 493 494 enable_interrupts( DEBUG_CTX ); 437 495 } 438 496 439 497 void kernel_shutdown(void) { 440 498 LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n"); 499 500 disable_interrupts(); 441 501 442 502 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates. … … 448 508 // THE SYSTEM IS NOW COMPLETELY STOPPED 449 509 510 // Disable preemption 511 kernel_stop_preemption(); 512 450 513 // Destroy the system processor and its context in reverse order of construction 451 514 // These were manually constructed so we need manually destroy them … … 467 530 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 468 531 // the globalAbort flag is true. 469 lock( &kernel_abort_lock );532 lock( &kernel_abort_lock DEBUG_CTX2 ); 470 533 471 534 // first task to abort ? … … 485 548 } 486 549 487 return this_thread ();550 return this_thread; 488 551 } 489 552 … … 494 557 __lib_debug_write( STDERR_FILENO, abort_text, len ); 495 558 496 if ( thrd != this_coroutine ()) {497 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine ()->name, this_coroutine());559 if ( thrd != this_coroutine ) { 560 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine ); 498 561 __lib_debug_write( STDERR_FILENO, abort_text, len ); 499 562 } … … 505 568 extern "C" { 506 569 void __lib_debug_acquire() { 507 lock( &kernel_debug_lock);570 lock( &kernel_debug_lock DEBUG_CTX2 ); 508 571 } 509 572 510 573 void __lib_debug_release() { 511 unlock( &kernel_debug_lock);574 unlock( &kernel_debug_lock ); 512 575 } 513 576 } … … 525 588 } 526 589 527 bool try_lock( spinlock * this ) {590 bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) { 528 591 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0; 529 592 } 530 593 531 void lock( spinlock * this ) {594 void lock( spinlock * this DEBUG_CTX_PARAM2 ) { 532 595 for ( unsigned int i = 1;; i += 1 ) { 533 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break; 534 } 535 } 596 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; } 597 } 598 LIB_DEBUG_DO( 599 this->prev_name = caller; 600 this->prev_thrd = this_thread; 601 ) 602 } 603 604 void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) { 605 for ( unsigned int i = 1;; i += 1 ) { 606 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; } 607 yield(); 608 } 609 LIB_DEBUG_DO( 610 this->prev_name = caller; 611 this->prev_thrd = this_thread; 612 ) 613 } 614 536 615 537 616 void unlock( spinlock * this ) { … … 539 618 } 540 619 541 void ?{}( signal_once * this ) { 542 this->cond = false; 543 } 544 void ^?{}( signal_once * this ) { 545 546 } 547 548 void wait( signal_once * this ) { 549 lock( &this->lock ); 550 if( !this->cond ) { 551 append( &this->blocked, this_thread() ); 552 ScheduleInternal( &this->lock ); 553 lock( &this->lock ); 554 } 620 void ?{}( semaphore * this, int count = 1 ) { 621 (&this->lock){}; 622 this->count = count; 623 (&this->waiting){}; 624 } 625 void ^?{}(semaphore * this) {} 626 627 void P(semaphore * this) { 628 lock( &this->lock DEBUG_CTX2 ); 629 this->count -= 1; 630 if ( this->count < 0 ) { 631 // queue current task 632 append( &this->waiting, (thread_desc *)this_thread ); 633 634 // atomically release spin lock and block 635 BlockInternal( &this->lock ); 636 } 637 else { 638 unlock( &this->lock ); 639 } 640 } 641 642 void V(semaphore * this) { 643 thread_desc * thrd = NULL; 644 lock( &this->lock DEBUG_CTX2 ); 645 this->count += 1; 646 if ( this->count <= 0 ) { 647 // remove task at head of waiting list 648 thrd = pop_head( &this->waiting ); 649 } 650 555 651 unlock( &this->lock ); 556 } 557 558 void signal( signal_once * this ) { 559 lock( &this->lock ); 560 { 561 this->cond = true; 562 563 thread_desc * it; 564 while( it = pop_head( &this->blocked) ) { 565 ScheduleThread( it ); 566 } 567 } 568 unlock( &this->lock ); 652 653 // make new owner 654 WakeThread( thrd ); 569 655 } 570 656 -
src/libcfa/concurrency/kernel_private.h
r8b47e50 ref6851a 18 18 #define KERNEL_PRIVATE_H 19 19 20 #include "libhdr.h" 21 20 22 #include "kernel" 21 23 #include "thread" … … 23 25 #include "alarm.h" 24 26 25 #include "libhdr.h"26 27 27 28 //----------------------------------------------------------------------------- 28 29 // Scheduler 30 31 extern "C" { 32 void disable_interrupts(); 33 void enable_interrupts_noRF(); 34 void enable_interrupts( DEBUG_CTX_PARAM ); 35 } 36 29 37 void ScheduleThread( thread_desc * ); 38 static inline void WakeThread( thread_desc * thrd ) { 39 if( !thrd ) return; 40 41 disable_interrupts(); 42 ScheduleThread( thrd ); 43 enable_interrupts( DEBUG_CTX ); 44 } 30 45 thread_desc * nextThread(cluster * this); 31 46 32 void ScheduleInternal(void); 33 void ScheduleInternal(spinlock * lock); 34 void ScheduleInternal(thread_desc * thrd); 35 void ScheduleInternal(spinlock * lock, thread_desc * thrd); 36 void ScheduleInternal(spinlock ** locks, unsigned short count); 37 void ScheduleInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count); 47 void BlockInternal(void); 48 void BlockInternal(spinlock * lock); 49 void BlockInternal(thread_desc * thrd); 50 void BlockInternal(spinlock * lock, thread_desc * thrd); 51 void BlockInternal(spinlock ** locks, unsigned short count); 52 void BlockInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count); 53 void LeaveThread(spinlock * lock, thread_desc * thrd); 38 54 39 55 //----------------------------------------------------------------------------- … … 60 76 extern cluster * systemCluster; 61 77 extern system_proc_t * systemProcessor; 62 extern thread_local processor * this_processor; 63 64 static inline void disable_interrupts() { 65 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 66 assert( prev != (unsigned short) -1 ); 67 } 68 69 static inline void enable_interrupts_noRF() { 70 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 71 verify( prev != (unsigned short) 0 ); 72 } 73 74 static inline void enable_interrupts() { 75 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 76 verify( prev != (unsigned short) 0 ); 77 if( prev == 1 && this_processor->pending_preemption ) { 78 ScheduleInternal( this_processor->current_thread ); 79 this_processor->pending_preemption = false; 80 } 81 } 78 extern volatile thread_local processor * this_processor; 79 extern volatile thread_local coroutine_desc * this_coroutine; 80 extern volatile thread_local thread_desc * this_thread; 81 extern volatile thread_local unsigned short disable_preempt_count; 82 82 83 83 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/monitor
r8b47e50 ref6851a 26 26 static inline void ?{}(monitor_desc * this) { 27 27 this->owner = NULL; 28 this->stack_owner = NULL;29 28 this->recursion = 0; 30 29 } -
src/libcfa/concurrency/monitor.c
r8b47e50 ref6851a 19 19 #include <stdlib> 20 20 21 #include "libhdr.h" 21 22 #include "kernel_private.h" 22 #include "libhdr.h"23 23 24 24 //----------------------------------------------------------------------------- … … 44 44 45 45 extern "C" { 46 void __enter_monitor_desc( monitor_desc * this) {47 lock ( &this->lock);48 thread_desc * thrd = this_thread ();49 50 LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);46 void __enter_monitor_desc( monitor_desc * this ) { 47 lock_yield( &this->lock DEBUG_CTX2 ); 48 thread_desc * thrd = this_thread; 49 50 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion); 51 51 52 52 if( !this->owner ) { … … 62 62 //Some one else has the monitor, wait in line for it 63 63 append( &this->entry_queue, thrd ); 64 LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);65 ScheduleInternal( &this->lock );66 67 // ScheduleInternal will unlock spinlock, no need to unlock ourselves68 return; 64 // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd); 65 BlockInternal( &this->lock ); 66 67 //BlockInternal will unlock spinlock, no need to unlock ourselves 68 return; 69 69 } 70 70 … … 75 75 // leave pseudo code : 76 76 // TODO 77 void __leave_monitor_desc( monitor_desc * this) {78 lock ( &this->lock);79 80 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);81 verifyf( this_thread () == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion );77 void __leave_monitor_desc( monitor_desc * this ) { 78 lock_yield( &this->lock DEBUG_CTX2 ); 79 80 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion); 81 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion ); 82 82 83 83 //Leaving a recursion level, decrement the counter … … 96 96 unlock( &this->lock ); 97 97 98 LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);98 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner); 99 99 100 100 //We need to wake-up the thread 101 ScheduleThread( new_owner ); 101 WakeThread( new_owner ); 102 } 103 104 void __leave_thread_monitor( thread_desc * thrd ) { 105 monitor_desc * this = &thrd->mon; 106 lock_yield( &this->lock DEBUG_CTX2 ); 107 108 disable_interrupts(); 109 110 thrd->cor.state = Halted; 111 112 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion ); 113 114 //Leaving a recursion level, decrement the counter 115 this->recursion -= 1; 116 117 //If we haven't left the last level of recursion 118 //it means we don't need to do anything 119 if( this->recursion != 0) { 120 unlock( &this->lock ); 121 return; 122 } 123 124 thread_desc * new_owner = next_thread( this ); 125 126 LeaveThread( &this->lock, new_owner ); 102 127 } 103 128 } … … 121 146 enter( this->m, this->count ); 122 147 123 this->prev_mntrs = this_thread ()->current_monitors;124 this->prev_count = this_thread ()->current_monitor_count;125 126 this_thread ()->current_monitors = m;127 this_thread ()->current_monitor_count = count;148 this->prev_mntrs = this_thread->current_monitors; 149 this->prev_count = this_thread->current_monitor_count; 150 151 this_thread->current_monitors = m; 152 this_thread->current_monitor_count = count; 128 153 } 129 154 … … 131 156 leave( this->m, this->count ); 132 157 133 this_thread ()->current_monitors = this->prev_mntrs;134 this_thread ()->current_monitor_count = this->prev_count;158 this_thread->current_monitors = this->prev_mntrs; 159 this_thread->current_monitor_count = this->prev_count; 135 160 } 136 161 … … 159 184 // Internal scheduling 160 185 void wait( condition * this, uintptr_t user_info = 0 ) { 161 LIB_DEBUG_PRINT_SAFE("Waiting\n");186 // LIB_DEBUG_PRINT_SAFE("Waiting\n"); 162 187 163 188 brand_condition( this ); … … 170 195 unsigned short count = this->monitor_count; 171 196 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 172 spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal173 174 LIB_DEBUG_PRINT_SAFE("count %i\n", count);175 176 __condition_node_t waiter = { this_thread(), count, user_info };197 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 198 199 // LIB_DEBUG_PRINT_SAFE("count %i\n", count); 200 201 __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info }; 177 202 178 203 __condition_criterion_t criteria[count]; 179 204 for(int i = 0; i < count; i++) { 180 205 (&criteria[i]){ this->monitors[i], &waiter }; 181 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );206 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 182 207 } 183 208 … … 201 226 } 202 227 203 LIB_DEBUG_PRINT_SAFE("Will unblock: ");228 // LIB_DEBUG_PRINT_SAFE("Will unblock: "); 204 229 for(int i = 0; i < thread_count; i++) { 205 LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);206 } 207 LIB_DEBUG_PRINT_SAFE("\n");230 // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]); 231 } 232 // LIB_DEBUG_PRINT_SAFE("\n"); 208 233 209 234 // Everything is ready to go to sleep 210 ScheduleInternal( locks, count, threads, thread_count );235 BlockInternal( locks, count, threads, thread_count ); 211 236 212 237 … … 222 247 bool signal( condition * this ) { 223 248 if( is_empty( this ) ) { 224 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");249 // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n"); 225 250 return false; 226 251 } … … 231 256 232 257 unsigned short count = this->monitor_count; 233 258 234 259 //Some more checking in debug 235 260 LIB_DEBUG_DO( 236 thread_desc * this_thrd = this_thread ();261 thread_desc * this_thrd = this_thread; 237 262 if ( this->monitor_count != this_thrd->current_monitor_count ) { 238 263 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count ); … … 248 273 //Lock all the monitors 249 274 lock_all( this->monitors, NULL, count ); 250 LIB_DEBUG_PRINT_SAFE("Signalling");275 // LIB_DEBUG_PRINT_SAFE("Signalling"); 251 276 252 277 //Pop the head of the waiting queue … … 256 281 for(int i = 0; i < count; i++) { 257 282 __condition_criterion_t * crit = &node->criteria[i]; 258 LIB_DEBUG_PRINT_SAFE(" %p", crit->target);283 // LIB_DEBUG_PRINT_SAFE(" %p", crit->target); 259 284 assert( !crit->ready ); 260 285 push( &crit->target->signal_stack, crit ); 261 286 } 262 287 263 LIB_DEBUG_PRINT_SAFE("\n");288 // LIB_DEBUG_PRINT_SAFE("\n"); 264 289 265 290 //Release … … 281 306 unsigned short count = this->monitor_count; 282 307 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 283 spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal308 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 284 309 285 310 lock_all( this->monitors, locks, count ); 286 311 287 312 //create creteria 288 __condition_node_t waiter = { this_thread(), count, 0 };313 __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 }; 289 314 290 315 __condition_criterion_t criteria[count]; 291 316 for(int i = 0; i < count; i++) { 292 317 (&criteria[i]){ this->monitors[i], &waiter }; 293 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );318 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 294 319 push( &criteria[i].target->signal_stack, &criteria[i] ); 295 320 } … … 309 334 310 335 //Everything is ready to go to sleep 311 ScheduleInternal( locks, count, &signallee, 1 );336 BlockInternal( locks, count, &signallee, 1 ); 312 337 313 338 … … 325 350 326 351 uintptr_t front( condition * this ) { 327 verifyf( !is_empty(this), 352 verifyf( !is_empty(this), 328 353 "Attempt to access user data on an empty condition.\n" 329 354 "Possible cause is not checking if the condition is empty before reading stored data." … … 335 360 // Internal scheduling 336 361 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) { 337 // thread_desc * this = this_thread ();362 // thread_desc * this = this_thread; 338 363 339 364 // unsigned short count = this->current_monitor_count; 340 365 // unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 341 // spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal366 // spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 342 367 343 368 // lock_all( this->current_monitors, locks, count ); … … 348 373 349 374 // // // Everything is ready to go to sleep 350 // // ScheduleInternal( locks, count, threads, thread_count );375 // // BlockInternal( locks, count, threads, thread_count ); 351 376 352 377 … … 393 418 static inline void lock_all( spinlock ** locks, unsigned short count ) { 394 419 for( int i = 0; i < count; i++ ) { 395 lock ( locks[i]);420 lock_yield( locks[i] DEBUG_CTX2 ); 396 421 } 397 422 } … … 400 425 for( int i = 0; i < count; i++ ) { 401 426 spinlock * l = &source[i]->lock; 402 lock ( l);427 lock_yield( l DEBUG_CTX2 ); 403 428 if(locks) locks[i] = l; 404 429 } … … 443 468 for( int i = 0; i < count; i++ ) { 444 469 445 LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );470 // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target ); 446 471 if( &criteria[i] == target ) { 447 472 criteria[i].ready = true; 448 LIB_DEBUG_PRINT_SAFE( "True\n" );473 // LIB_DEBUG_PRINT_SAFE( "True\n" ); 449 474 } 450 475 … … 452 477 } 453 478 454 LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );479 // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run ); 455 480 return ready2run ? node->waiting_thread : NULL; 456 481 } 457 482 458 483 static inline void brand_condition( condition * this ) { 459 thread_desc * thrd = this_thread ();484 thread_desc * thrd = this_thread; 460 485 if( !this->monitors ) { 461 LIB_DEBUG_PRINT_SAFE("Branding\n");486 // LIB_DEBUG_PRINT_SAFE("Branding\n"); 462 487 assertf( thrd->current_monitors != NULL, "No current monitor to brand condition", thrd->current_monitors ); 463 488 this->monitor_count = thrd->current_monitor_count; -
src/libcfa/concurrency/preemption.c
r8b47e50 ref6851a 15 15 // 16 16 17 #include "libhdr.h" 17 18 #include "preemption.h" 18 19 19 20 extern "C" { 21 #include <errno.h> 22 #include <execinfo.h> 23 #define __USE_GNU 20 24 #include <signal.h> 21 } 22 23 #define __CFA_DEFAULT_PREEMPTION__ 10 25 #undef __USE_GNU 26 #include <stdio.h> 27 #include <string.h> 28 #include <unistd.h> 29 } 30 31 32 #ifdef __USE_STREAM__ 33 #include "fstream" 34 #endif 35 36 #define __CFA_DEFAULT_PREEMPTION__ 10000 24 37 25 38 __attribute__((weak)) unsigned int default_preemption() { … … 27 40 } 28 41 42 #define __CFA_SIGCXT__ ucontext_t * 43 #define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt 44 29 45 static void preempt( processor * this ); 30 46 static void timeout( thread_desc * this ); 31 47 48 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 49 void sigHandler_alarm ( __CFA_SIGPARMS__ ); 50 void sigHandler_segv ( __CFA_SIGPARMS__ ); 51 void sigHandler_abort ( __CFA_SIGPARMS__ ); 52 53 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ); 54 LIB_DEBUG_DO( bool validate( alarm_list_t * this ); ) 55 56 #ifdef __x86_64__ 57 #define CFA_REG_IP REG_RIP 58 #else 59 #define CFA_REG_IP REG_EIP 60 #endif 61 62 32 63 //============================================================================================= 33 64 // Kernel Preemption logic 34 65 //============================================================================================= 35 66 36 void kernel_start_preemption() {37 38 }39 40 67 void tick_preemption() { 68 // LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Ticking preemption\n" ); 69 41 70 alarm_list_t * alarms = &systemProcessor->alarms; 42 71 __cfa_time_t currtime = __kernel_get_time(); 43 72 while( alarms->head && alarms->head->alarm < currtime ) { 44 73 alarm_node_t * node = pop(alarms); 74 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking %p\n", node ); 75 45 76 if( node->kernel_alarm ) { 46 77 preempt( node->proc ); … … 50 81 } 51 82 83 verify( validate( alarms ) ); 84 52 85 if( node->period > 0 ) { 53 node->alarm +=node->period;86 node->alarm = currtime + node->period; 54 87 insert( alarms, node ); 55 88 } … … 62 95 __kernel_set_timer( alarms->head->alarm - currtime ); 63 96 } 97 98 verify( validate( alarms ) ); 99 // LIB_DEBUG_PRINT_BUFFER_LOCAL( STDERR_FILENO, "Ticking preemption done\n" ); 64 100 } 65 101 66 102 void update_preemption( processor * this, __cfa_time_t duration ) { 67 // assert( THREAD_GETMEM( disableInt ) && THREAD_GETMEM( disableIntCnt ) == 1 ); 103 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, "Processor : %p updating preemption to %lu\n", this, duration ); 104 68 105 alarm_node_t * alarm = this->preemption_alarm; 106 duration *= 1000; 69 107 70 108 // Alarms need to be enabled … … 89 127 } 90 128 129 //============================================================================================= 130 // Kernel Signal Tools 131 //============================================================================================= 132 133 LIB_DEBUG_DO( static thread_local void * last_interrupt = 0; ) 134 135 extern "C" { 136 void disable_interrupts() { 137 __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 138 verify( new_val < (unsigned short)65_000 ); 139 verify( new_val != (unsigned short) 0 ); 140 } 141 142 void enable_interrupts_noRF() { 143 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 144 verify( prev != (unsigned short) 0 ); 145 } 146 147 void enable_interrupts( DEBUG_CTX_PARAM ) { 148 processor * proc = this_processor; 149 thread_desc * thrd = this_thread; 150 unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 151 verify( prev != (unsigned short) 0 ); 152 if( prev == 1 && proc->pending_preemption ) { 153 proc->pending_preemption = false; 154 BlockInternal( thrd ); 155 } 156 157 LIB_DEBUG_DO( proc->last_enable = caller; ) 158 } 159 } 160 161 static inline void signal_unblock( int sig ) { 162 sigset_t mask; 163 sigemptyset( &mask ); 164 sigaddset( &mask, sig ); 165 166 if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) { 167 abortf( "internal error, pthread_sigmask" ); 168 } 169 } 170 171 static inline void signal_block( int sig ) { 172 sigset_t mask; 173 sigemptyset( &mask ); 174 sigaddset( &mask, sig ); 175 176 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 177 abortf( "internal error, pthread_sigmask" ); 178 } 179 } 180 181 static inline bool preemption_ready() { 182 return disable_preempt_count == 0; 183 } 184 185 static inline void defer_ctxSwitch() { 186 this_processor->pending_preemption = true; 187 } 188 189 static inline void defer_alarm() { 190 systemProcessor->pending_alarm = true; 191 } 192 193 static void preempt( processor * this ) { 194 pthread_kill( this->kernel_thread, SIGUSR1 ); 195 } 196 197 static void timeout( thread_desc * this ) { 198 //TODO : implement waking threads 199 } 200 201 //============================================================================================= 202 // Kernel Signal Startup/Shutdown logic 203 //============================================================================================= 204 205 static pthread_t alarm_thread; 206 void * alarm_loop( __attribute__((unused)) void * args ); 207 208 void kernel_start_preemption() { 209 LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n"); 210 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); 211 // __kernel_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); 212 // __kernel_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); 213 214 signal_block( SIGALRM ); 215 216 pthread_create( &alarm_thread, NULL, alarm_loop, NULL ); 217 } 218 219 void kernel_stop_preemption() { 220 sigset_t mask; 221 sigfillset( &mask ); 222 sigprocmask( SIG_BLOCK, &mask, NULL ); 223 224 pthread_kill( alarm_thread, SIGINT ); 225 pthread_join( alarm_thread, NULL ); 226 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n"); 227 } 228 91 229 void ?{}( preemption_scope * this, processor * proc ) { 92 230 (&this->alarm){ proc }; … … 97 235 98 236 void ^?{}( preemption_scope * this ) { 237 disable_interrupts(); 238 99 239 update_preemption( this->proc, 0 ); 100 240 } 101 241 102 242 //============================================================================================= 103 // Kernel Signal logic 104 //============================================================================================= 105 106 static inline bool preemption_ready() { 107 return this_processor->disable_preempt_count == 0; 108 } 109 110 static inline void defer_ctxSwitch() { 111 this_processor->pending_preemption = true; 112 } 113 114 static inline void defer_alarm() { 115 systemProcessor->pending_alarm = true; 116 } 117 118 void sigHandler_ctxSwitch( __attribute__((unused)) int sig ) { 243 // Kernel Signal Handlers 244 //============================================================================================= 245 246 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 247 LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); ) 119 248 if( preemption_ready() ) { 120 ScheduleInternal( this_processor->current_thread ); 249 signal_unblock( SIGUSR1 ); 250 BlockInternal( (thread_desc*)this_thread ); 121 251 } 122 252 else { … … 125 255 } 126 256 127 void sigHandler_alarm( __attribute__((unused)) int sig ) { 128 if( try_lock( &systemProcessor->alarm_lock ) ) { 129 tick_preemption(); 130 unlock( &systemProcessor->alarm_lock ); 131 } 132 else { 133 defer_alarm(); 134 } 135 } 136 137 static void preempt( processor * this ) { 138 pthread_kill( this->kernel_thread, SIGUSR1 ); 139 } 140 141 static void timeout( thread_desc * this ) { 142 //TODO : implement waking threads 143 } 257 // void sigHandler_alarm( __CFA_SIGPARMS__ ) { 258 // LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); ) 259 // verify( this_processor == systemProcessor ); 260 261 // if( try_lock( &systemProcessor->alarm_lock DEBUG_CTX2 ) ) { 262 // tick_preemption(); 263 // systemProcessor->pending_alarm = false; 264 // unlock( &systemProcessor->alarm_lock ); 265 // } 266 // else { 267 // defer_alarm(); 268 // } 269 270 // signal_unblock( SIGALRM ); 271 272 // if( preemption_ready() && this_processor->pending_preemption ) { 273 274 // this_processor->pending_preemption = false; 275 // BlockInternal( (thread_desc*)this_thread ); 276 // } 277 // } 278 279 void * alarm_loop( __attribute__((unused)) void * args ) { 280 sigset_t mask; 281 sigemptyset( &mask ); 282 sigaddset( &mask, SIGALRM ); 283 sigaddset( &mask, SIGUSR2 ); 284 sigaddset( &mask, SIGINT ); 285 286 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 287 abortf( "internal error, pthread_sigmask" ); 288 } 289 290 while( true ) { 291 int sig; 292 if( sigwait( &mask, &sig ) != 0 ) { 293 abortf( "internal error, sigwait" ); 294 } 295 296 switch( sig) { 297 case SIGALRM: 298 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n"); 299 lock( &systemProcessor->alarm_lock DEBUG_CTX2 ); 300 tick_preemption(); 301 unlock( &systemProcessor->alarm_lock ); 302 break; 303 case SIGUSR2: 304 //TODO other actions 305 break; 306 case SIGINT: 307 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread stopping\n"); 308 return NULL; 309 default: 310 abortf( "internal error, sigwait returned sig %d", sig ); 311 break; 312 } 313 } 314 } 315 316 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) { 317 struct sigaction act; 318 319 act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler; 320 act.sa_flags = flags; 321 322 if ( sigaction( sig, &act, NULL ) == -1 ) { 323 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 324 " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n", 325 sig, handler, flags, errno, strerror( errno ) 326 ); 327 _exit( EXIT_FAILURE ); 328 } 329 } 330 331 typedef void (*sa_handler_t)(int); 332 333 static void __kernel_sigdefault( int sig ) { 334 struct sigaction act; 335 336 // act.sa_handler = SIG_DFL; 337 act.sa_flags = 0; 338 sigemptyset( &act.sa_mask ); 339 340 if ( sigaction( sig, &act, NULL ) == -1 ) { 341 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 342 " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n", 343 sig, errno, strerror( errno ) 344 ); 345 _exit( EXIT_FAILURE ); 346 } 347 } 348 349 //============================================================================================= 350 // Terminating Signals logic 351 //============================================================================================= 352 353 LIB_DEBUG_DO( 354 static void __kernel_backtrace( int start ) { 355 // skip first N stack frames 356 357 enum { Frames = 50 }; 358 void * array[Frames]; 359 int size = backtrace( array, Frames ); 360 char ** messages = backtrace_symbols( array, size ); 361 362 // find executable name 363 *index( messages[0], '(' ) = '\0'; 364 #ifdef __USE_STREAM__ 365 serr | "Stack back trace for:" | messages[0] | endl; 366 #else 367 fprintf( stderr, "Stack back trace for: %s\n", messages[0]); 368 #endif 369 370 // skip last 2 stack frames after main 371 for ( int i = start; i < size && messages != NULL; i += 1 ) { 372 char * name = NULL; 373 char * offset_begin = NULL; 374 char * offset_end = NULL; 375 376 for ( char *p = messages[i]; *p; ++p ) { 377 // find parantheses and +offset 378 if ( *p == '(' ) { 379 name = p; 380 } 381 else if ( *p == '+' ) { 382 offset_begin = p; 383 } 384 else if ( *p == ')' ) { 385 offset_end = p; 386 break; 387 } 388 } 389 390 // if line contains symbol print it 391 int frameNo = i - start; 392 if ( name && offset_begin && offset_end && name < offset_begin ) { 393 // delimit strings 394 *name++ = '\0'; 395 *offset_begin++ = '\0'; 396 *offset_end++ = '\0'; 397 398 #ifdef __USE_STREAM__ 399 serr | "(" | frameNo | ")" | messages[i] | ":" 400 | name | "+" | offset_begin | offset_end | endl; 401 #else 402 fprintf( stderr, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end); 403 #endif 404 } 405 // otherwise, print the whole line 406 else { 407 #ifdef __USE_STREAM__ 408 serr | "(" | frameNo | ")" | messages[i] | endl; 409 #else 410 fprintf( stderr, "(%i) %s\n", frameNo, messages[i] ); 411 #endif 412 } 413 } 414 415 free( messages ); 416 } 417 ) 418 419 // void sigHandler_segv( __CFA_SIGPARMS__ ) { 420 // LIB_DEBUG_DO( 421 // #ifdef __USE_STREAM__ 422 // serr | "*CFA runtime error* program cfa-cpp terminated with" 423 // | (sig == SIGSEGV ? "segment fault." : "bus error.") 424 // | endl; 425 // #else 426 // fprintf( stderr, "*CFA runtime error* program cfa-cpp terminated with %s\n", sig == SIGSEGV ? "segment fault." : "bus error." ); 427 // #endif 428 429 // // skip first 2 stack frames 430 // __kernel_backtrace( 1 ); 431 // ) 432 // exit( EXIT_FAILURE ); 433 // } 434 435 // void sigHandler_abort( __CFA_SIGPARMS__ ) { 436 // // skip first 6 stack frames 437 // LIB_DEBUG_DO( __kernel_backtrace( 6 ); ) 438 439 // // reset default signal handler 440 // __kernel_sigdefault( SIGABRT ); 441 442 // raise( SIGABRT ); 443 // } -
src/libcfa/concurrency/thread
r8b47e50 ref6851a 54 54 } 55 55 56 thread_desc * this_thread(void);56 extern volatile thread_local thread_desc * this_thread; 57 57 58 58 forall( dtype T | is_thread(T) ) -
src/libcfa/concurrency/thread.c
r8b47e50 ref6851a 28 28 } 29 29 30 extern thread_local processor * this_processor;30 extern volatile thread_local processor * this_processor; 31 31 32 32 //----------------------------------------------------------------------------- … … 71 71 coroutine_desc* thrd_c = get_coroutine(this); 72 72 thread_desc* thrd_h = get_thread (this); 73 thrd_c->last = this_coroutine(); 74 this_processor->current_coroutine = thrd_c; 73 thrd_c->last = this_coroutine; 75 74 76 LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);75 // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); 77 76 77 disable_interrupts(); 78 78 create_stack(&thrd_c->stack, thrd_c->stack.size); 79 this_coroutine = thrd_c; 79 80 CtxStart(this, CtxInvokeThread); 81 assert( thrd_c->last->stack.context ); 80 82 CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context ); 81 83 82 84 ScheduleThread(thrd_h); 85 enable_interrupts( DEBUG_CTX ); 83 86 } 84 87 85 88 void yield( void ) { 86 ScheduleInternal( this_processor->current_thread );89 BlockInternal( (thread_desc *)this_thread ); 87 90 } 88 91 … … 95 98 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 96 99 // set state of current coroutine to inactive 97 src->state = Inactive;100 src->state = src->state == Halted ? Halted : Inactive; 98 101 dst->state = Active; 99 102 … … 103 106 // set new coroutine that the processor is executing 104 107 // and context switch to it 105 this_processor->current_coroutine = dst; 108 this_coroutine = dst; 109 assert( src->stack.context ); 106 110 CtxSwitch( src->stack.context, dst->stack.context ); 107 this_ processor->current_coroutine = src;111 this_coroutine = src; 108 112 109 113 // set state of new coroutine to active 110 dst->state = Inactive;114 dst->state = dst->state == Halted ? Halted : Inactive; 111 115 src->state = Active; 112 116 } -
src/libcfa/libhdr/libalign.h
r8b47e50 ref6851a 1 // -*- Mode: C++ -*- 1 // -*- Mode: C++ -*- 2 2 // 3 3 // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo … … 18 18 // Free Software Foundation; either version 2.1 of the License, or (at your 19 19 // option) any later version. 20 // 20 // 21 21 // This library is distributed in the hope that it will be useful, but WITHOUT 22 22 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 23 23 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License 24 24 // for more details. 25 // 25 // 26 26 // You should have received a copy of the GNU Lesser General Public License 27 27 // along with this library. 28 // 28 // 29 29 30 30 … … 33 33 34 34 #include "assert" 35 #include <stdbool.h> 35 36 36 // Minimum size used to align memory boundaries for memory allocations. 37 // Minimum size used to align memory boundaries for memory allocations. 37 38 #define libAlign() (sizeof(double)) 38 39 -
src/libcfa/libhdr/libdebug.h
r8b47e50 ref6851a 18 18 19 19 #ifdef __CFA_DEBUG__ 20 #define LIB_DEBUG_DO(x) x 21 #define LIB_NO_DEBUG_DO(x) ((void)0) 20 #define LIB_DEBUG_DO(...) __VA_ARGS__ 21 #define LIB_NO_DEBUG_DO(...) 22 #define DEBUG_CTX __PRETTY_FUNCTION__ 23 #define DEBUG_CTX2 , __PRETTY_FUNCTION__ 24 #define DEBUG_CTX_PARAM const char * caller 25 #define DEBUG_CTX_PARAM2 , const char * caller 22 26 #else 23 #define LIB_DEBUG_DO(x) ((void)0) 24 #define LIB_NO_DEBUG_DO(x) x 27 #define LIB_DEBUG_DO(...) 28 #define LIB_NO_DEBUG_DO(...) __VA_ARGS__ 29 #define DEBUG_CTX 30 #define DEBUG_CTX2 31 #define DEBUG_CTX_PARAM 32 #define DEBUG_CTX_PARAM2 25 33 #endif 26 34 … … 51 59 52 60 #ifdef __CFA_DEBUG_PRINT__ 53 #define LIB_DEBUG_WRITE( fd, buffer, len ) __lib_debug_write( fd, buffer, len ) 54 #define LIB_DEBUG_ACQUIRE() __lib_debug_acquire() 55 #define LIB_DEBUG_RELEASE() __lib_debug_release() 56 #define LIB_DEBUG_PRINT_SAFE(...) __lib_debug_print_safe (__VA_ARGS__) 57 #define LIB_DEBUG_PRINT_NOLOCK(...) __lib_debug_print_nolock (__VA_ARGS__) 58 #define LIB_DEBUG_PRINT_BUFFER(...) __lib_debug_print_buffer (__VA_ARGS__) 61 #define LIB_DEBUG_WRITE( fd, buffer, len ) __lib_debug_write( fd, buffer, len ) 62 #define LIB_DEBUG_ACQUIRE() __lib_debug_acquire() 63 #define LIB_DEBUG_RELEASE() __lib_debug_release() 64 #define LIB_DEBUG_PRINT_SAFE(...) __lib_debug_print_safe (__VA_ARGS__) 65 #define LIB_DEBUG_PRINT_NOLOCK(...) __lib_debug_print_nolock (__VA_ARGS__) 66 #define LIB_DEBUG_PRINT_BUFFER(...) __lib_debug_print_buffer (__VA_ARGS__) 67 #define LIB_DEBUG_PRINT_BUFFER_DECL(fd, ...) char text[256]; int len = snprintf( text, 256, __VA_ARGS__ ); __lib_debug_write( fd, text, len ); 68 #define LIB_DEBUG_PRINT_BUFFER_LOCAL(fd, ...) len = snprintf( text, 256, __VA_ARGS__ ); __lib_debug_write( fd, text, len ); 59 69 #else 60 #define LIB_DEBUG_WRITE(...) ((void)0) 61 #define LIB_DEBUG_ACQUIRE() ((void)0) 62 #define LIB_DEBUG_RELEASE() ((void)0) 63 #define LIB_DEBUG_PRINT_SAFE(...) ((void)0) 64 #define LIB_DEBUG_PRINT_NOLOCK(...) ((void)0) 65 #define LIB_DEBUG_PRINT_BUFFER(...) ((void)0) 70 #define LIB_DEBUG_WRITE(...) ((void)0) 71 #define LIB_DEBUG_ACQUIRE() ((void)0) 72 #define LIB_DEBUG_RELEASE() ((void)0) 73 #define LIB_DEBUG_PRINT_SAFE(...) ((void)0) 74 #define LIB_DEBUG_PRINT_NOLOCK(...) ((void)0) 75 #define LIB_DEBUG_PRINT_BUFFER(...) ((void)0) 76 #define LIB_DEBUG_PRINT_BUFFER_DECL(...) ((void)0) 77 #define LIB_DEBUG_PRINT_BUFFER_LOCAL(...) ((void)0) 66 78 #endif 67 79 -
src/tests/.expect/concurrent/sched-int-disjoint.txt
r8b47e50 ref6851a 9 9 9000 10 10 10000 11 1100012 1200013 1300014 1400015 1500016 1600017 1700018 1800019 1900020 2000021 2100022 2200023 2300024 2400025 2500026 2600027 2700028 2800029 2900030 3000031 3100032 3200033 3300034 3400035 3500036 3600037 3700038 3800039 3900040 4000041 4100042 4200043 4300044 4400045 4500046 4600047 4700048 4800049 4900050 5000051 5100052 5200053 5300054 5400055 5500056 5600057 5700058 5800059 5900060 6000061 6100062 6200063 6300064 6400065 6500066 6600067 6700068 6800069 6900070 7000071 7100072 7200073 7300074 7400075 7500076 7600077 7700078 7800079 7900080 8000081 8100082 8200083 8300084 8400085 8500086 8600087 8700088 8800089 8900090 9000091 9100092 9200093 9300094 9400095 9500096 9600097 9700098 9800099 99000100 100000101 11 All waiter done -
src/tests/preempt_longrun/Makefile.am
r8b47e50 ref6851a 17 17 repeats=10 18 18 max_time=30 19 preempt=1 0_000ul19 preempt=1_000ul 20 20 21 21 REPEAT = ${abs_top_srcdir}/tools/repeat -s … … 25 25 CC = @CFA_BINDIR@/@CFA_NAME@ 26 26 27 TESTS = barge block create disjoint processor stack wait yield27 TESTS = barge block create disjoint enter enter3 processor stack wait yield 28 28 29 29 .INTERMEDIATE: ${TESTS} -
src/tests/preempt_longrun/Makefile.in
r8b47e50 ref6851a 450 450 repeats = 10 451 451 max_time = 30 452 preempt = 1 0_000ul452 preempt = 1_000ul 453 453 REPEAT = ${abs_top_srcdir}/tools/repeat -s 454 454 BUILD_FLAGS = -g -Wall -Wno-unused-function -quiet @CFA_FLAGS@ -debug -O2 -DPREEMPTION_RATE=${preempt} 455 TESTS = barge block create disjoint processor stack wait yield455 TESTS = barge block create disjoint enter enter3 processor stack wait yield 456 456 all: all-am 457 457 … … 663 663 $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ 664 664 "$$tst" $(AM_TESTS_FD_REDIRECT) 665 enter.log: enter 666 @p='enter'; \ 667 b='enter'; \ 668 $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ 669 --log-file $$b.log --trs-file $$b.trs \ 670 $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ 671 "$$tst" $(AM_TESTS_FD_REDIRECT) 672 enter3.log: enter3 673 @p='enter3'; \ 674 b='enter3'; \ 675 $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ 676 --log-file $$b.log --trs-file $$b.trs \ 677 $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ 678 "$$tst" $(AM_TESTS_FD_REDIRECT) 665 679 processor.log: processor 666 680 @p='processor'; \ -
src/tests/preempt_longrun/create.c
r8b47e50 ref6851a 10 10 } 11 11 12 thread Worker{};12 thread worker_t {}; 13 13 14 void main( Worker* this) {}14 void main(worker_t * this) {} 15 15 16 16 int main(int argc, char* argv[]) { 17 for(int i = 0; i < 10 0_000ul; i++) {18 Worker w;17 for(int i = 0; i < 10_000ul; i++) { 18 worker_t w[7]; 19 19 } 20 20 } -
src/tests/preempt_longrun/processor.c
r8b47e50 ref6851a 10 10 } 11 11 12 thread Worker{};12 thread worker_t {}; 13 13 14 void main( Worker* this) {}14 void main(worker_t * this) {} 15 15 16 16 int main(int argc, char* argv[]) { 17 for(int i = 0; i < 10 0_000ul; i++) {17 for(int i = 0; i < 10_000ul; i++) { 18 18 processor p; 19 19 } -
src/tests/preempt_longrun/stack.c
r8b47e50 ref6851a 12 12 } 13 13 14 thread Worker{};14 thread worker_t {}; 15 15 16 void main( Worker* this) {16 void main(worker_t * this) { 17 17 volatile long p = 5_021_609ul; 18 18 volatile long a = 326_417ul; 19 19 volatile long n = 1l; 20 for (volatile long i = 0; i < p; i++) { 21 n *= a; 22 n %= p; 20 for (volatile long i = 0; i < p; i++) { 21 n *= a; 22 n %= p; 23 23 } 24 24 25 25 if( n != a ) { 26 26 abort(); … … 28 28 } 29 29 30 extern "C" { 31 static worker_t * workers; 32 } 33 30 34 int main(int argc, char* argv[]) { 31 35 processor p; 32 36 { 33 Worker w[7]; 37 worker_t w[7]; 38 workers = w; 34 39 } 35 40 } -
src/tests/preempt_longrun/yield.c
r8b47e50 ref6851a 10 10 } 11 11 12 thread Worker{};12 thread worker_t {}; 13 13 14 void main( Worker* this) {15 for(int i = 0; i < 100_000ul; i++) {14 void main(worker_t * this) { 15 for(int i = 0; i < 325_000ul; i++) { 16 16 yield(); 17 17 } 18 } 19 20 extern "C" { 21 static worker_t * workers; 18 22 } 19 23 … … 21 25 processor p; 22 26 { 23 Worker w[7]; 27 worker_t w[7]; 28 workers = w; 24 29 } 25 30 } -
src/tests/sched-int-block.c
r8b47e50 ref6851a 6 6 7 7 #ifndef N 8 #define N 10 0_0008 #define N 10_000 9 9 #endif 10 10 … … 31 31 //------------------------------------------------------------------------------ 32 32 void wait_op( global_data_t * mutex a, global_data_t * mutex b, unsigned i ) { 33 wait( &cond, (uintptr_t)this_thread ());33 wait( &cond, (uintptr_t)this_thread ); 34 34 35 35 yield( ((unsigned)rand48()) % 10 ); … … 40 40 } 41 41 42 a->last_thread = b->last_thread = this_thread ();42 a->last_thread = b->last_thread = this_thread; 43 43 44 44 yield( ((unsigned)rand48()) % 10 ); … … 56 56 yield( ((unsigned)rand48()) % 10 ); 57 57 58 a->last_thread = b->last_thread = a->last_signaller = b->last_signaller = this_thread ();58 a->last_thread = b->last_thread = a->last_signaller = b->last_signaller = this_thread; 59 59 60 60 if( !is_empty( &cond ) ) { … … 86 86 //------------------------------------------------------------------------------ 87 87 void barge_op( global_data_t * mutex a ) { 88 a->last_thread = this_thread ();88 a->last_thread = this_thread; 89 89 } 90 90 -
src/tests/sched-int-disjoint.c
r8b47e50 ref6851a 5 5 6 6 #ifndef N 7 #define N 10 0_0007 #define N 10_000 8 8 #endif 9 9 … … 42 42 43 43 void main( Barger * this ) { 44 while( !all_done ) { 44 while( !all_done ) { 45 45 barge( &data ); 46 yield(); 46 yield(); 47 47 } 48 48 } … … 53 53 wait( &cond ); 54 54 if( d->state != SIGNAL ) { 55 sout | "ERROR barging!" | endl; 55 sout | "ERROR barging!" | endl; 56 56 } 57 57 … … 85 85 bool running = data.counter < N && data.counter > 0; 86 86 if( data.state != SIGNAL && running ) { 87 sout | "ERROR Eager signal" | data.state | endl; 87 sout | "ERROR Eager signal" | data.state | endl; 88 88 } 89 89 } … … 92 92 93 93 void main( Signaller * this ) { 94 while( !all_done ) { 94 while( !all_done ) { 95 95 logic( &mut ); 96 yield(); 96 yield(); 97 97 } 98 98 } … … 111 111 sout | "All waiter done" | endl; 112 112 all_done = true; 113 } 113 } 114 114 } -
src/tests/sched-int-wait.c
r8b47e50 ref6851a 50 50 unsigned action = (unsigned)rand48() % 4; 51 51 switch( action ) { 52 case 0: 52 case 0: 53 53 signal( &condABC, &globalA, &globalB, &globalC ); 54 54 break; 55 case 1: 55 case 1: 56 56 signal( &condAB , &globalA, &globalB ); 57 57 break; 58 case 2: 58 case 2: 59 59 signal( &condBC , &globalB, &globalC ); 60 60 break; 61 case 3: 61 case 3: 62 62 signal( &condAC , &globalA, &globalC ); 63 63 break; … … 67 67 } 68 68 yield(); 69 } 69 } 70 70 } 71 71 -
src/tests/thread.c
r8b47e50 ref6851a 4 4 #include <thread> 5 5 6 // thread First;7 // void main(First* this);6 thread First { semaphore* lock; }; 7 thread Second { semaphore* lock; }; 8 8 9 // thread Second; 10 // void main(Second* this); 11 12 thread First { signal_once* lock; }; 13 thread Second { signal_once* lock; }; 14 15 void ?{}( First * this, signal_once* lock ) { this->lock = lock; } 16 void ?{}( Second * this, signal_once* lock ) { this->lock = lock; } 9 void ?{}( First * this, semaphore* lock ) { this->lock = lock; } 10 void ?{}( Second * this, semaphore* lock ) { this->lock = lock; } 17 11 18 12 void main(First* this) { … … 21 15 yield(); 22 16 } 23 signal(this->lock);17 V(this->lock); 24 18 } 25 19 26 20 void main(Second* this) { 27 wait(this->lock);21 P(this->lock); 28 22 for(int i = 0; i < 10; i++) { 29 23 sout | "Second : Suspend No." | i + 1 | endl; … … 34 28 35 29 int main(int argc, char* argv[]) { 36 s ignal_once lock;30 semaphore lock = { 0 }; 37 31 sout | "User main begin" | endl; 38 32 {
Note: See TracChangeset
for help on using the changeset viewer.