Changeset b826e6b for src/libcfa/concurrency
- Timestamp:
- Jul 19, 2017, 11:49:33 AM (8 years ago)
- Branches:
- ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
- Children:
- 9cc0472
- Parents:
- fea3faa (diff), a57cb58 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/CtxSwitch-i386.S
rfea3faa rb826e6b 98 98 ret 99 99 100 .text101 .align 2102 .globl CtxGet103 CtxGet:104 movl %esp,SP_OFFSET(%eax)105 movl %ebp,FP_OFFSET(%eax)106 107 ret108 109 100 // Local Variables: // 110 101 // compile-command: "make install" // -
src/libcfa/concurrency/CtxSwitch-x86_64.S
rfea3faa rb826e6b 1 // -*- Mode: Asm -*- 1 // -*- Mode: Asm -*- 2 2 // 3 3 // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo … … 18 18 // Free Software Foundation; either version 2.1 of the License, or (at your 19 19 // option) any later version. 20 // 20 // 21 21 // This library is distributed in the hope that it will be useful, but WITHOUT 22 22 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 23 23 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License 24 24 // for more details. 25 // 25 // 26 26 // You should have received a copy of the GNU Lesser General Public License 27 27 // along with this library. 28 // 28 // 29 29 30 30 // This context switch routine depends on the fact that the stack of a new … … 93 93 .globl CtxInvokeStub 94 94 CtxInvokeStub: 95 movq %rbx, %rdi 95 movq %rbx, %rdi 96 96 jmp *%r12 97 98 .text99 .align 2100 .globl CtxGet101 CtxGet:102 movq %rsp,SP_OFFSET(%rdi)103 movq %rbp,FP_OFFSET(%rdi)104 105 ret106 97 107 98 // Local Variables: // -
src/libcfa/concurrency/alarm.c
rfea3faa rb826e6b 16 16 17 17 extern "C" { 18 #include <errno.h> 19 #include <stdio.h> 20 #include <string.h> 18 21 #include <time.h> 22 #include <unistd.h> 19 23 #include <sys/time.h> 20 24 } 25 26 #include "libhdr.h" 21 27 22 28 #include "alarm.h" … … 25 31 26 32 //============================================================================================= 33 // time type 34 //============================================================================================= 35 36 #define one_second 1_000_000_000ul 37 #define one_milisecond 1_000_000ul 38 #define one_microsecond 1_000ul 39 #define one_nanosecond 1ul 40 41 __cfa_time_t zero_time = { 0 }; 42 43 void ?{}( __cfa_time_t * this ) { this->val = 0; } 44 void ?{}( __cfa_time_t * this, zero_t zero ) { this->val = 0; } 45 46 void ?{}( itimerval * this, __cfa_time_t * alarm ) { 47 this->it_value.tv_sec = alarm->val / one_second; // seconds 48 this->it_value.tv_usec = max( (alarm->val % one_second) / one_microsecond, 1000 ); // microseconds 49 this->it_interval.tv_sec = 0; 50 this->it_interval.tv_usec = 0; 51 } 52 53 54 void ?{}( __cfa_time_t * this, timespec * curr ) { 55 uint64_t secs = curr->tv_sec; 56 uint64_t nsecs = curr->tv_nsec; 57 this->val = (secs * one_second) + nsecs; 58 } 59 60 __cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs ) { 61 this->val = 0; 62 return *this; 63 } 64 65 __cfa_time_t from_s ( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000_000ul; return ret; } 66 __cfa_time_t from_ms( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000_000ul; return ret; } 67 __cfa_time_t from_us( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1_000ul; return ret; } 68 __cfa_time_t from_ns( uint64_t val ) { __cfa_time_t ret; ret.val = val * 1ul; return ret; } 69 70 //============================================================================================= 27 71 // Clock logic 28 72 //============================================================================================= … … 31 75 timespec curr; 32 76 clock_gettime( CLOCK_REALTIME, &curr ); 33 return ( (__cfa_time_t)curr.tv_sec * TIMEGRAN) + curr.tv_nsec;77 return (__cfa_time_t){ &curr }; 34 78 } 35 79 36 80 void __kernel_set_timer( __cfa_time_t alarm ) { 37 itimerval val; 38 val.it_value.tv_sec = alarm / TIMEGRAN; // seconds 39 val.it_value.tv_usec = (alarm % TIMEGRAN) / ( TIMEGRAN / 1_000_000L ); // microseconds 40 val.it_interval.tv_sec = 0; 41 val.it_interval.tv_usec = 0; 81 itimerval val = { &alarm }; 42 82 setitimer( ITIMER_REAL, &val, NULL ); 43 83 } … … 47 87 //============================================================================================= 48 88 49 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0) {89 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) { 50 90 this->thrd = thrd; 51 91 this->alarm = alarm; … … 56 96 } 57 97 58 void ?{}( alarm_node_t * this, processor * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0) {98 void ?{}( alarm_node_t * this, processor * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ) { 59 99 this->proc = proc; 60 100 this->alarm = alarm; … … 71 111 } 72 112 113 LIB_DEBUG_DO( bool validate( alarm_list_t * this ) { 114 alarm_node_t ** it = &this->head; 115 while( (*it) ) { 116 it = &(*it)->next; 117 } 118 119 return it == this->tail; 120 }) 121 73 122 static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) { 74 assert( !n->next );123 verify( !n->next ); 75 124 if( p == this->tail ) { 76 125 this->tail = &n->next; … … 80 129 } 81 130 *p = n; 131 132 verify( validate( this ) ); 82 133 } 83 134 … … 89 140 90 141 insert_at( this, n, it ); 142 143 verify( validate( this ) ); 91 144 } 92 145 … … 100 153 head->next = NULL; 101 154 } 155 verify( validate( this ) ); 102 156 return head; 103 157 } … … 105 159 static inline void remove_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t it ) { 106 160 verify( it ); 107 verify( (*it) ->next== n );108 109 (*it) ->next= n->next;161 verify( (*it) == n ); 162 163 (*it) = n->next; 110 164 if( !n-> next ) { 111 165 this->tail = it; 112 166 } 113 167 n->next = NULL; 168 169 verify( validate( this ) ); 114 170 } 115 171 116 172 static inline void remove( alarm_list_t * this, alarm_node_t * n ) { 117 173 alarm_node_t ** it = &this->head; 118 while( (*it) && (*it) ->next!= n ) {174 while( (*it) && (*it) != n ) { 119 175 it = &(*it)->next; 120 176 } 121 177 178 verify( validate( this ) ); 179 122 180 if( *it ) { remove_at( this, n, it ); } 181 182 verify( validate( this ) ); 123 183 } 124 184 125 185 void register_self( alarm_node_t * this ) { 186 alarm_list_t * alarms = &event_kernel->alarms; 187 126 188 disable_interrupts(); 127 assert( !systemProcessor->pending_alarm ); 128 lock( &systemProcessor->alarm_lock ); 189 lock( &event_kernel->lock DEBUG_CTX2 ); 129 190 { 130 insert( &systemProcessor->alarms, this ); 131 if( systemProcessor->pending_alarm ) { 132 tick_preemption(); 191 verify( validate( alarms ) ); 192 bool first = !alarms->head; 193 194 insert( alarms, this ); 195 if( first ) { 196 __kernel_set_timer( alarms->head->alarm - __kernel_get_time() ); 133 197 } 134 198 } 135 unlock( & systemProcessor->alarm_lock );199 unlock( &event_kernel->lock ); 136 200 this->set = true; 137 enable_interrupts( );201 enable_interrupts( DEBUG_CTX ); 138 202 } 139 203 140 204 void unregister_self( alarm_node_t * this ) { 141 205 disable_interrupts(); 142 lock( &systemProcessor->alarm_lock ); 143 remove( &systemProcessor->alarms, this ); 144 unlock( &systemProcessor->alarm_lock ); 145 disable_interrupts(); 206 lock( &event_kernel->lock DEBUG_CTX2 ); 207 { 208 verify( validate( &event_kernel->alarms ) ); 209 remove( &event_kernel->alarms, this ); 210 } 211 unlock( &event_kernel->lock ); 212 enable_interrupts( DEBUG_CTX ); 146 213 this->set = false; 147 214 } -
src/libcfa/concurrency/alarm.h
rfea3faa rb826e6b 19 19 20 20 #include <stdbool.h> 21 #include <stdint.h> 21 22 22 23 #include "assert" 23 24 typedef unsigned long int __cfa_time_t;25 24 26 25 struct thread_desc; 27 26 struct processor; 28 27 28 struct timespec; 29 struct itimerval; 30 31 //============================================================================================= 32 // time type 33 //============================================================================================= 34 35 struct __cfa_time_t { 36 uint64_t val; 37 }; 38 39 // ctors 40 void ?{}( __cfa_time_t * this ); 41 void ?{}( __cfa_time_t * this, zero_t zero ); 42 void ?{}( __cfa_time_t * this, timespec * curr ); 43 void ?{}( itimerval * this, __cfa_time_t * alarm ); 44 45 __cfa_time_t ?=?( __cfa_time_t * this, zero_t rhs ); 46 47 // logical ops 48 static inline bool ?==?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val == rhs.val; } 49 static inline bool ?!=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val != rhs.val; } 50 static inline bool ?>? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val > rhs.val; } 51 static inline bool ?<? ( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val < rhs.val; } 52 static inline bool ?>=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val >= rhs.val; } 53 static inline bool ?<=?( __cfa_time_t lhs, __cfa_time_t rhs ) { return lhs.val <= rhs.val; } 54 55 static inline bool ?==?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val == rhs; } 56 static inline bool ?!=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val != rhs; } 57 static inline bool ?>? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val > rhs; } 58 static inline bool ?<? ( __cfa_time_t lhs, zero_t rhs ) { return lhs.val < rhs; } 59 static inline bool ?>=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val >= rhs; } 60 static inline bool ?<=?( __cfa_time_t lhs, zero_t rhs ) { return lhs.val <= rhs; } 61 62 // addition/substract 63 static inline __cfa_time_t ?+?( __cfa_time_t lhs, __cfa_time_t rhs ) { 64 __cfa_time_t ret; 65 ret.val = lhs.val + rhs.val; 66 return ret; 67 } 68 69 static inline __cfa_time_t ?-?( __cfa_time_t lhs, __cfa_time_t rhs ) { 70 __cfa_time_t ret; 71 ret.val = lhs.val - rhs.val; 72 return ret; 73 } 74 75 __cfa_time_t from_s ( uint64_t ); 76 __cfa_time_t from_ms( uint64_t ); 77 __cfa_time_t from_us( uint64_t ); 78 __cfa_time_t from_ns( uint64_t ); 79 80 extern __cfa_time_t zero_time; 81 29 82 //============================================================================================= 30 83 // Clock logic 31 84 //============================================================================================= 32 33 #define TIMEGRAN 1_000_000_000L // nanosecond granularity, except for timeval34 85 35 86 __cfa_time_t __kernel_get_time(); … … 56 107 typedef alarm_node_t ** __alarm_it_t; 57 108 58 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = 0, __cfa_time_t period = 0);59 void ?{}( alarm_node_t * this, processor * proc, __cfa_time_t alarm = 0, __cfa_time_t period = 0);109 void ?{}( alarm_node_t * this, thread_desc * thrd, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ); 110 void ?{}( alarm_node_t * this, processor * proc, __cfa_time_t alarm = zero_time, __cfa_time_t period = zero_time ); 60 111 void ^?{}( alarm_node_t * this ); 61 112 -
src/libcfa/concurrency/coroutine
rfea3faa rb826e6b 63 63 64 64 // Get current coroutine 65 coroutine_desc * this_coroutine(void);65 extern volatile thread_local coroutine_desc * this_coroutine; 66 66 67 67 // Private wrappers for context switch and stack creation … … 71 71 // Suspend implementation inlined for performance 72 72 static inline void suspend() { 73 coroutine_desc * src = this_coroutine (); // optimization73 coroutine_desc * src = this_coroutine; // optimization 74 74 75 75 assertf( src->last != 0, … … 88 88 forall(dtype T | is_coroutine(T)) 89 89 static inline void resume(T * cor) { 90 coroutine_desc * src = this_coroutine (); // optimization90 coroutine_desc * src = this_coroutine; // optimization 91 91 coroutine_desc * dst = get_coroutine(cor); 92 92 … … 112 112 113 113 static inline void resume(coroutine_desc * dst) { 114 coroutine_desc * src = this_coroutine (); // optimization114 coroutine_desc * src = this_coroutine; // optimization 115 115 116 116 // not resuming self ? -
src/libcfa/concurrency/coroutine.c
rfea3faa rb826e6b 32 32 #include "invoke.h" 33 33 34 extern thread_local processor * this_processor;34 extern volatile thread_local processor * this_processor; 35 35 36 36 //----------------------------------------------------------------------------- … … 44 44 // Coroutine ctors and dtors 45 45 void ?{}(coStack_t* this) { 46 this->size = 10240; // size of stack46 this->size = 65000; // size of stack 47 47 this->storage = NULL; // pointer to stack 48 48 this->limit = NULL; // stack grows towards stack limit … … 50 50 this->context = NULL; // address of cfa_context_t 51 51 this->top = NULL; // address of top of storage 52 this->userStack = false; 52 this->userStack = false; 53 53 } 54 54 … … 106 106 107 107 // set state of current coroutine to inactive 108 src->state = Inactive;108 src->state = src->state == Halted ? Halted : Inactive; 109 109 110 110 // set new coroutine that task is executing 111 this_ processor->current_coroutine = dst;111 this_coroutine = dst; 112 112 113 113 // context switch to specified coroutine 114 assert( src->stack.context ); 114 115 CtxSwitch( src->stack.context, dst->stack.context ); 115 // when CtxSwitch returns we are back in the src coroutine 116 // when CtxSwitch returns we are back in the src coroutine 116 117 117 118 // set state of new coroutine to active … … 131 132 this->size = libCeiling( storageSize, 16 ); 132 133 // use malloc/memalign because "new" raises an exception for out-of-memory 133 134 134 135 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment 135 136 LIB_DEBUG_DO( this->storage = memalign( pageSize, cxtSize + this->size + pageSize ) ); -
src/libcfa/concurrency/invoke.c
rfea3faa rb826e6b 29 29 30 30 extern void __suspend_internal(void); 31 extern void __leave_monitor_desc( struct monitor_desc * this ); 31 extern void __leave_thread_monitor( struct thread_desc * this ); 32 extern void disable_interrupts(); 33 extern void enable_interrupts( DEBUG_CTX_PARAM ); 32 34 33 35 void CtxInvokeCoroutine( 34 void (*main)(void *), 35 struct coroutine_desc *(*get_coroutine)(void *), 36 void (*main)(void *), 37 struct coroutine_desc *(*get_coroutine)(void *), 36 38 void *this 37 39 ) { … … 56 58 57 59 void CtxInvokeThread( 58 void (*dtor)(void *), 59 void (*main)(void *), 60 struct thread_desc *(*get_thread)(void *), 60 void (*dtor)(void *), 61 void (*main)(void *), 62 struct thread_desc *(*get_thread)(void *), 61 63 void *this 62 64 ) { 65 // First suspend, once the thread arrives here, 66 // the function pointer to main can be invalidated without risk 63 67 __suspend_internal(); 64 68 69 // Fetch the thread handle from the user defined thread structure 65 70 struct thread_desc* thrd = get_thread( this ); 66 struct coroutine_desc* cor = &thrd->cor;67 struct monitor_desc* mon = &thrd->mon;68 cor->state = Active;69 71 70 // LIB_DEBUG_PRINTF("Invoke Thread : invoking main %p (args %p)\n", main, this); 72 // Officially start the thread by enabling preemption 73 enable_interrupts( DEBUG_CTX ); 74 75 // Call the main of the thread 71 76 main( this ); 72 77 73 __leave_monitor_desc( mon ); 74 78 // To exit a thread we must : 79 // 1 - Mark it as halted 80 // 2 - Leave its monitor 81 // 3 - Disable the interupts 82 // 4 - Final suspend 83 // The order of these 4 operations is very important 75 84 //Final suspend, should never return 76 __ suspend_internal();85 __leave_thread_monitor( thrd ); 77 86 abortf("Resumed dead thread"); 78 87 } … … 80 89 81 90 void CtxStart( 82 void (*main)(void *), 83 struct coroutine_desc *(*get_coroutine)(void *), 84 void *this, 91 void (*main)(void *), 92 struct coroutine_desc *(*get_coroutine)(void *), 93 void *this, 85 94 void (*invoke)(void *) 86 95 ) { … … 108 117 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->rturn = invoke; 109 118 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 110 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 119 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 111 120 112 121 #elif defined( __x86_64__ ) … … 128 137 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fixedRegisters[1] = invoke; 129 138 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->mxcr = 0x1F80; //Vol. 2A 3-520 130 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 139 ((struct FakeStack *)(((struct machine_context_t *)stack->context)->SP))->fcw = 0x037F; //Vol. 1 8-7 131 140 #else 132 141 #error Only __i386__ and __x86_64__ is supported for threads in cfa -
src/libcfa/concurrency/invoke.h
rfea3faa rb826e6b 31 31 struct spinlock { 32 32 volatile int lock; 33 #ifdef __CFA_DEBUG__ 34 const char * prev_name; 35 void* prev_thrd; 36 #endif 33 37 }; 34 38 … … 83 87 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor 84 88 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor 85 struct monitor_desc * stack_owner; // if bulk acquiring was used we need to synchronize signals with an other monitor86 89 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that 87 90 }; … … 99 102 #ifndef _INVOKE_PRIVATE_H_ 100 103 #define _INVOKE_PRIVATE_H_ 101 104 102 105 struct machine_context_t { 103 106 void *SP; … … 109 112 extern void CtxInvokeStub( void ); 110 113 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); 111 void CtxGet( void * this ) asm ("CtxGet"); 114 115 #if defined( __x86_64__ ) 116 #define CtxGet( ctx ) __asm__ ( \ 117 "movq %%rsp,%0\n" \ 118 "movq %%rbp,%1\n" \ 119 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 120 #elif defined( __i386__ ) 121 #define CtxGet( ctx ) __asm__ ( \ 122 "movl %%esp,%0\n" \ 123 "movl %%ebp,%1\n" \ 124 : "=rm" (ctx.SP), "=rm" (ctx.FP) ) 125 #endif 112 126 113 127 #endif //_INVOKE_PRIVATE_H_ -
src/libcfa/concurrency/kernel
rfea3faa rb826e6b 28 28 //----------------------------------------------------------------------------- 29 29 // Locks 30 bool try_lock( spinlock * ); 31 void lock( spinlock * ); 32 void unlock( spinlock * ); 30 void lock ( spinlock * DEBUG_CTX_PARAM2 ); // Lock the spinlock, spin if already acquired 31 void lock_yield( spinlock * DEBUG_CTX_PARAM2 ); // Lock the spinlock, yield repeatedly if already acquired 32 bool try_lock ( spinlock * DEBUG_CTX_PARAM2 ); // Lock the spinlock, return false if already acquired 33 void unlock ( spinlock * ); // Unlock the spinlock 33 34 34 struct s ignal_once {35 volatile bool cond;36 struct spinlock lock;37 struct __thread_queue_t blocked;35 struct semaphore { 36 spinlock lock; 37 int count; 38 __thread_queue_t waiting; 38 39 }; 39 40 40 void ?{}(signal_once * this); 41 void ^?{}(signal_once * this); 41 void ?{}(semaphore * this, int count = 1); 42 void ^?{}(semaphore * this); 43 void P(semaphore * this); 44 void V(semaphore * this); 42 45 43 void wait( signal_once * );44 void signal( signal_once * );45 46 46 47 //----------------------------------------------------------------------------- 47 48 // Cluster 48 49 struct cluster { 49 __thread_queue_t ready_queue; 50 spinlock lock; 50 spinlock ready_queue_lock; // Ready queue locks 51 __thread_queue_t ready_queue; // Ready queue for threads 52 unsigned long long int preemption; // Preemption rate on this cluster 51 53 }; 52 54 … … 68 70 unsigned short thrd_count; 69 71 }; 70 static inline void ?{}(FinishAction * this) { 72 static inline void ?{}(FinishAction * this) { 71 73 this->action_code = No_Action; 72 74 this->thrd = NULL; … … 75 77 static inline void ^?{}(FinishAction * this) {} 76 78 79 // Processor 80 // Wrapper around kernel threads 77 81 struct processor { 78 struct processorCtx_t * runner; 79 cluster * cltr; 80 coroutine_desc * current_coroutine; 81 thread_desc * current_thread; 82 pthread_t kernel_thread; 83 84 signal_once terminated; 85 volatile bool is_terminated; 82 // Main state 83 struct processorCtx_t * runner; // Coroutine ctx who does keeps the state of the processor 84 cluster * cltr; // Cluster from which to get threads 85 pthread_t kernel_thread; // Handle to pthreads 86 86 87 struct FinishAction finish; 87 // Termination 88 volatile bool do_terminate; // Set to true to notify the processor should terminate 89 semaphore terminated; // Termination synchronisation 88 90 89 struct alarm_node_t * preemption_alarm;90 unsigned int preemption;91 // RunThread data 92 struct FinishAction finish; // Action to do after a thread is ran 91 93 92 unsigned short disable_preempt_count; 94 // Preemption data 95 struct alarm_node_t * preemption_alarm; // Node which is added in the discrete event simulaiton 96 bool pending_preemption; // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible 93 97 94 bool pending_preemption; 98 #ifdef __CFA_DEBUG__ 99 char * last_enable; // Last function to enable preemption on this processor 100 #endif 95 101 }; 96 102 -
src/libcfa/concurrency/kernel.c
rfea3faa rb826e6b 15 15 // 16 16 17 #include "startup.h" 18 19 //Start and stop routine for the kernel, declared first to make sure they run first 20 void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 21 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 22 23 //Header 24 #include "kernel_private.h" 17 #include "libhdr.h" 25 18 26 19 //C Includes … … 35 28 36 29 //CFA Includes 37 #include " libhdr.h"30 #include "kernel_private.h" 38 31 #include "preemption.h" 32 #include "startup.h" 39 33 40 34 //Private includes … … 42 36 #include "invoke.h" 43 37 38 //Start and stop routine for the kernel, declared first to make sure they run first 39 void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 40 void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 41 44 42 //----------------------------------------------------------------------------- 45 43 // Kernel storage 46 #define KERNEL_STORAGE(T,X) static char X##_storage[sizeof(T)] 47 48 KERNEL_STORAGE(processorCtx_t, systemProcessorCtx); 49 KERNEL_STORAGE(cluster, systemCluster); 50 KERNEL_STORAGE(system_proc_t, systemProcessor); 51 KERNEL_STORAGE(thread_desc, mainThread); 52 KERNEL_STORAGE(machine_context_t, mainThread_context); 53 54 cluster * systemCluster; 55 system_proc_t * systemProcessor; 44 KERNEL_STORAGE(cluster, mainCluster); 45 KERNEL_STORAGE(processor, mainProcessor); 46 KERNEL_STORAGE(processorCtx_t, mainProcessorCtx); 47 KERNEL_STORAGE(thread_desc, mainThread); 48 KERNEL_STORAGE(machine_context_t, mainThreadCtx); 49 50 cluster * mainCluster; 51 processor * mainProcessor; 56 52 thread_desc * mainThread; 57 53 … … 59 55 // Global state 60 56 61 thread_local processor * this_processor; 62 63 coroutine_desc * this_coroutine(void) { 64 return this_processor->current_coroutine; 65 } 66 67 thread_desc * this_thread(void) { 68 return this_processor->current_thread; 69 } 57 volatile thread_local coroutine_desc * this_coroutine; 58 volatile thread_local thread_desc * this_thread; 59 volatile thread_local processor * this_processor; 60 61 volatile thread_local bool preemption_in_progress = 0; 62 volatile thread_local unsigned short disable_preempt_count = 1; 70 63 71 64 //----------------------------------------------------------------------------- 72 65 // Main thread construction 73 66 struct current_stack_info_t { 74 machine_context_t ctx; 67 machine_context_t ctx; 75 68 unsigned int size; // size of stack 76 69 void *base; // base of stack … … 82 75 83 76 void ?{}( current_stack_info_t * this ) { 84 CtxGet( &this->ctx );77 CtxGet( this->ctx ); 85 78 this->base = this->ctx.FP; 86 79 this->storage = this->ctx.SP; … … 91 84 92 85 this->limit = (void *)(((intptr_t)this->base) - this->size); 93 this->context = & mainThread_context_storage;86 this->context = &storage_mainThreadCtx; 94 87 this->top = this->base; 95 88 } … … 106 99 107 100 void ?{}( coroutine_desc * this, current_stack_info_t * info) { 108 (&this->stack){ info }; 101 (&this->stack){ info }; 109 102 this->name = "Main Thread"; 110 103 this->errno_ = 0; … … 131 124 132 125 void ?{}(processor * this) { 133 this{ systemCluster };126 this{ mainCluster }; 134 127 } 135 128 136 129 void ?{}(processor * this, cluster * cltr) { 137 130 this->cltr = cltr; 138 this->current_coroutine = NULL; 139 this->current_thread = NULL; 140 (&this->terminated){}; 141 this->is_terminated = false; 131 (&this->terminated){ 0 }; 132 this->do_terminate = false; 142 133 this->preemption_alarm = NULL; 143 this->preemption = default_preemption();144 this->disable_preempt_count = 1; //Start with interrupts disabled145 134 this->pending_preemption = false; 146 135 … … 150 139 void ?{}(processor * this, cluster * cltr, processorCtx_t * runner) { 151 140 this->cltr = cltr; 152 this->current_coroutine = NULL; 153 this->current_thread = NULL; 154 (&this->terminated){}; 155 this->is_terminated = false; 156 this->disable_preempt_count = 0; 141 (&this->terminated){ 0 }; 142 this->do_terminate = false; 143 this->preemption_alarm = NULL; 157 144 this->pending_preemption = false; 145 this->kernel_thread = pthread_self(); 158 146 159 147 this->runner = runner; 160 LIB_DEBUG_PRINT_SAFE("Kernel : constructing processor context %p\n", runner);148 LIB_DEBUG_PRINT_SAFE("Kernel : constructing main processor context %p\n", runner); 161 149 runner{ this }; 162 150 } 163 151 164 void ?{}(system_proc_t * this, cluster * cltr, processorCtx_t * runner) {165 (&this->alarms){};166 (&this->alarm_lock){};167 this->pending_alarm = false;168 169 (&this->proc){ cltr, runner };170 }171 172 152 void ^?{}(processor * this) { 173 if( ! this-> is_terminated) {153 if( ! this->do_terminate ) { 174 154 LIB_DEBUG_PRINT_SAFE("Kernel : core %p signaling termination\n", this); 175 this->is_terminated = true; 176 wait( &this->terminated ); 155 this->do_terminate = true; 156 P( &this->terminated ); 157 pthread_join( this->kernel_thread, NULL ); 177 158 } 178 159 } … … 180 161 void ?{}(cluster * this) { 181 162 ( &this->ready_queue ){}; 182 ( &this->lock ){}; 163 ( &this->ready_queue_lock ){}; 164 165 this->preemption = default_preemption(); 183 166 } 184 167 185 168 void ^?{}(cluster * this) { 186 169 187 170 } 188 171 … … 203 186 204 187 thread_desc * readyThread = NULL; 205 for( unsigned int spin_count = 0; ! this-> is_terminated; spin_count++ )188 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ ) 206 189 { 207 190 readyThread = nextThread( this->cltr ); … … 209 192 if(readyThread) 210 193 { 194 verify( disable_preempt_count > 0 ); 195 211 196 runThread(this, readyThread); 197 198 verify( disable_preempt_count > 0 ); 212 199 213 200 //Some actions need to be taken from the kernel … … 225 212 } 226 213 227 signal( &this->terminated ); 214 V( &this->terminated ); 215 228 216 LIB_DEBUG_PRINT_SAFE("Kernel : core %p terminated\n", this); 229 217 } 230 218 231 // runThread runs a thread by context switching 232 // from the processor coroutine to the target thread 219 // runThread runs a thread by context switching 220 // from the processor coroutine to the target thread 233 221 void runThread(processor * this, thread_desc * dst) { 234 222 coroutine_desc * proc_cor = get_coroutine(this->runner); 235 223 coroutine_desc * thrd_cor = get_coroutine(dst); 236 224 237 225 //Reset the terminating actions here 238 226 this->finish.action_code = No_Action; 239 227 240 228 //Update global state 241 this ->current_thread = dst;229 this_thread = dst; 242 230 243 231 // Context Switch to the thread … … 246 234 } 247 235 248 // Once a thread has finished running, some of 236 // Once a thread has finished running, some of 249 237 // its final actions must be executed from the kernel 250 238 void finishRunning(processor * this) { … … 256 244 } 257 245 else if( this->finish.action_code == Release_Schedule ) { 258 unlock( this->finish.lock ); 246 unlock( this->finish.lock ); 259 247 ScheduleThread( this->finish.thrd ); 260 248 } … … 289 277 processor * proc = (processor *) arg; 290 278 this_processor = proc; 279 this_coroutine = NULL; 280 this_thread = NULL; 281 disable_preempt_count = 1; 291 282 // SKULLDUGGERY: We want to create a context for the processor coroutine 292 283 // which is needed for the 2-step context switch. However, there is no reason 293 // to waste the perfectly valid stack create by pthread. 284 // to waste the perfectly valid stack create by pthread. 294 285 current_stack_info_t info; 295 286 machine_context_t ctx; … … 300 291 301 292 //Set global state 302 proc->current_coroutine = &proc->runner->__cor;303 proc->current_thread = NULL;293 this_coroutine = &proc->runner->__cor; 294 this_thread = NULL; 304 295 305 296 //We now have a proper context from which to schedule threads 306 297 LIB_DEBUG_PRINT_SAFE("Kernel : core %p created (%p, %p)\n", proc, proc->runner, &ctx); 307 298 308 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 309 // resume it to start it like it normally would, it will just context switch 310 // back to here. Instead directly call the main since we already are on the 299 // SKULLDUGGERY: Since the coroutine doesn't have its own stack, we can't 300 // resume it to start it like it normally would, it will just context switch 301 // back to here. Instead directly call the main since we already are on the 311 302 // appropriate stack. 312 303 proc_cor_storage.__cor.state = Active; … … 315 306 316 307 // Main routine of the core returned, the core is now fully terminated 317 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 308 LIB_DEBUG_PRINT_SAFE("Kernel : core %p main ended (%p)\n", proc, proc->runner); 318 309 319 310 return NULL; … … 322 313 void start(processor * this) { 323 314 LIB_DEBUG_PRINT_SAFE("Kernel : Starting core %p\n", this); 324 315 325 316 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 326 317 327 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 318 LIB_DEBUG_PRINT_SAFE("Kernel : core %p started\n", this); 328 319 } 329 320 … … 331 322 // Scheduler routines 332 323 void ScheduleThread( thread_desc * thrd ) { 333 if( !thrd ) return; 324 // if( !thrd ) return; 325 assert( thrd ); 326 assert( thrd->cor.state != Halted ); 327 328 verify( disable_preempt_count > 0 ); 334 329 335 330 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 336 337 lock( &systemProcessor->proc.cltr->lock ); 338 append( &systemProcessor->proc.cltr->ready_queue, thrd ); 339 unlock( &systemProcessor->proc.cltr->lock ); 331 332 lock( &this_processor->cltr->ready_queue_lock DEBUG_CTX2 ); 333 append( &this_processor->cltr->ready_queue, thrd ); 334 unlock( &this_processor->cltr->ready_queue_lock ); 335 336 verify( disable_preempt_count > 0 ); 340 337 } 341 338 342 339 thread_desc * nextThread(cluster * this) { 343 lock( &this->lock ); 340 verify( disable_preempt_count > 0 ); 341 lock( &this->ready_queue_lock DEBUG_CTX2 ); 344 342 thread_desc * head = pop_head( &this->ready_queue ); 345 unlock( &this->lock ); 343 unlock( &this->ready_queue_lock ); 344 verify( disable_preempt_count > 0 ); 346 345 return head; 347 346 } 348 347 349 void ScheduleInternal() { 350 suspend(); 351 } 352 353 void ScheduleInternal( spinlock * lock ) { 348 void BlockInternal() { 349 disable_interrupts(); 350 verify( disable_preempt_count > 0 ); 351 suspend(); 352 verify( disable_preempt_count > 0 ); 353 enable_interrupts( DEBUG_CTX ); 354 } 355 356 void BlockInternal( spinlock * lock ) { 357 disable_interrupts(); 354 358 this_processor->finish.action_code = Release; 355 359 this_processor->finish.lock = lock; 356 suspend(); 357 } 358 359 void ScheduleInternal( thread_desc * thrd ) { 360 361 verify( disable_preempt_count > 0 ); 362 suspend(); 363 verify( disable_preempt_count > 0 ); 364 365 enable_interrupts( DEBUG_CTX ); 366 } 367 368 void BlockInternal( thread_desc * thrd ) { 369 disable_interrupts(); 370 assert( thrd->cor.state != Halted ); 360 371 this_processor->finish.action_code = Schedule; 361 372 this_processor->finish.thrd = thrd; 362 suspend(); 363 } 364 365 void ScheduleInternal( spinlock * lock, thread_desc * thrd ) { 373 374 verify( disable_preempt_count > 0 ); 375 suspend(); 376 verify( disable_preempt_count > 0 ); 377 378 enable_interrupts( DEBUG_CTX ); 379 } 380 381 void BlockInternal( spinlock * lock, thread_desc * thrd ) { 382 disable_interrupts(); 366 383 this_processor->finish.action_code = Release_Schedule; 367 384 this_processor->finish.lock = lock; 368 385 this_processor->finish.thrd = thrd; 369 suspend(); 370 } 371 372 void ScheduleInternal(spinlock ** locks, unsigned short count) { 386 387 verify( disable_preempt_count > 0 ); 388 suspend(); 389 verify( disable_preempt_count > 0 ); 390 391 enable_interrupts( DEBUG_CTX ); 392 } 393 394 void BlockInternal(spinlock ** locks, unsigned short count) { 395 disable_interrupts(); 373 396 this_processor->finish.action_code = Release_Multi; 374 397 this_processor->finish.locks = locks; 375 398 this_processor->finish.lock_count = count; 376 suspend(); 377 } 378 379 void ScheduleInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 399 400 verify( disable_preempt_count > 0 ); 401 suspend(); 402 verify( disable_preempt_count > 0 ); 403 404 enable_interrupts( DEBUG_CTX ); 405 } 406 407 void BlockInternal(spinlock ** locks, unsigned short lock_count, thread_desc ** thrds, unsigned short thrd_count) { 408 disable_interrupts(); 380 409 this_processor->finish.action_code = Release_Multi_Schedule; 381 410 this_processor->finish.locks = locks; … … 383 412 this_processor->finish.thrds = thrds; 384 413 this_processor->finish.thrd_count = thrd_count; 414 415 verify( disable_preempt_count > 0 ); 416 suspend(); 417 verify( disable_preempt_count > 0 ); 418 419 enable_interrupts( DEBUG_CTX ); 420 } 421 422 void LeaveThread(spinlock * lock, thread_desc * thrd) { 423 verify( disable_preempt_count > 0 ); 424 this_processor->finish.action_code = thrd ? Release_Schedule : Release; 425 this_processor->finish.lock = lock; 426 this_processor->finish.thrd = thrd; 427 385 428 suspend(); 386 429 } … … 392 435 // Kernel boot procedures 393 436 void kernel_startup(void) { 394 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 437 LIB_DEBUG_PRINT_SAFE("Kernel : Starting\n"); 395 438 396 439 // Start by initializing the main thread 397 // SKULLDUGGERY: the mainThread steals the process main thread 398 // which will then be scheduled by the systemProcessor normally399 mainThread = (thread_desc *)& mainThread_storage;440 // SKULLDUGGERY: the mainThread steals the process main thread 441 // which will then be scheduled by the mainProcessor normally 442 mainThread = (thread_desc *)&storage_mainThread; 400 443 current_stack_info_t info; 401 444 mainThread{ &info }; … … 403 446 LIB_DEBUG_PRINT_SAFE("Kernel : Main thread ready\n"); 404 447 448 // Initialize the main cluster 449 mainCluster = (cluster *)&storage_mainCluster; 450 mainCluster{}; 451 452 LIB_DEBUG_PRINT_SAFE("Kernel : main cluster ready\n"); 453 454 // Initialize the main processor and the main processor ctx 455 // (the coroutine that contains the processing control flow) 456 mainProcessor = (processor *)&storage_mainProcessor; 457 mainProcessor{ mainCluster, (processorCtx_t *)&storage_mainProcessorCtx }; 458 459 //initialize the global state variables 460 this_processor = mainProcessor; 461 this_thread = mainThread; 462 this_coroutine = &mainThread->cor; 463 405 464 // Enable preemption 406 465 kernel_start_preemption(); 407 466 408 // Initialize the system cluster 409 systemCluster = (cluster *)&systemCluster_storage; 410 systemCluster{}; 411 412 LIB_DEBUG_PRINT_SAFE("Kernel : System cluster ready\n"); 413 414 // Initialize the system processor and the system processor ctx 415 // (the coroutine that contains the processing control flow) 416 systemProcessor = (system_proc_t *)&systemProcessor_storage; 417 systemProcessor{ systemCluster, (processorCtx_t *)&systemProcessorCtx_storage }; 418 419 // Add the main thread to the ready queue 420 // once resume is called on systemProcessor->runner the mainThread needs to be scheduled like any normal thread 467 // Add the main thread to the ready queue 468 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 421 469 ScheduleThread(mainThread); 422 470 423 //initialize the global state variables 424 this_processor = &systemProcessor->proc; 425 this_processor->current_thread = mainThread; 426 this_processor->current_coroutine = &mainThread->cor; 427 428 // SKULLDUGGERY: Force a context switch to the system processor to set the main thread's context to the current UNIX 471 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 429 472 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 430 // mainThread is on the ready queue when this call is made. 431 resume( systemProcessor->proc.runner );473 // mainThread is on the ready queue when this call is made. 474 resume( mainProcessor->runner ); 432 475 433 476 … … 435 478 // THE SYSTEM IS NOW COMPLETELY RUNNING 436 479 LIB_DEBUG_PRINT_SAFE("Kernel : Started\n--------------------------------------------------\n\n"); 480 481 enable_interrupts( DEBUG_CTX ); 437 482 } 438 483 … … 440 485 LIB_DEBUG_PRINT_SAFE("\n--------------------------------------------------\nKernel : Shutting down\n"); 441 486 442 // SKULLDUGGERY: Notify the systemProcessor it needs to terminates. 487 disable_interrupts(); 488 489 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. 443 490 // When its coroutine terminates, it return control to the mainThread 444 491 // which is currently here 445 systemProcessor->proc.is_terminated= true;492 mainProcessor->do_terminate = true; 446 493 suspend(); 447 494 448 495 // THE SYSTEM IS NOW COMPLETELY STOPPED 449 496 450 // Destroy the system processor and its context in reverse order of construction 497 // Disable preemption 498 kernel_stop_preemption(); 499 500 // Destroy the main processor and its context in reverse order of construction 451 501 // These were manually constructed so we need manually destroy them 452 ^( systemProcessor->proc.runner){};453 ^( systemProcessor){};502 ^(mainProcessor->runner){}; 503 ^(mainProcessor){}; 454 504 455 505 // Final step, destroy the main thread since it is no longer needed … … 457 507 ^(mainThread){}; 458 508 459 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 509 LIB_DEBUG_PRINT_SAFE("Kernel : Shutdown complete\n"); 460 510 } 461 511 … … 467 517 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 468 518 // the globalAbort flag is true. 469 lock( &kernel_abort_lock );519 lock( &kernel_abort_lock DEBUG_CTX2 ); 470 520 471 521 // first task to abort ? … … 473 523 kernel_abort_called = true; 474 524 unlock( &kernel_abort_lock ); 475 } 525 } 476 526 else { 477 527 unlock( &kernel_abort_lock ); 478 528 479 529 sigset_t mask; 480 530 sigemptyset( &mask ); … … 482 532 sigaddset( &mask, SIGUSR1 ); // block SIGUSR1 signals 483 533 sigsuspend( &mask ); // block the processor to prevent further damage during abort 484 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 485 } 486 487 return this_thread ();534 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 535 } 536 537 return this_thread; 488 538 } 489 539 … … 494 544 __lib_debug_write( STDERR_FILENO, abort_text, len ); 495 545 496 if ( thrd != this_coroutine ()) {497 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine ()->name, this_coroutine());546 if ( thrd != this_coroutine ) { 547 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", this_coroutine->name, this_coroutine ); 498 548 __lib_debug_write( STDERR_FILENO, abort_text, len ); 499 } 549 } 500 550 else { 501 551 __lib_debug_write( STDERR_FILENO, ".\n", 2 ); … … 505 555 extern "C" { 506 556 void __lib_debug_acquire() { 507 lock( &kernel_debug_lock);557 lock( &kernel_debug_lock DEBUG_CTX2 ); 508 558 } 509 559 510 560 void __lib_debug_release() { 511 unlock( &kernel_debug_lock);561 unlock( &kernel_debug_lock ); 512 562 } 513 563 } … … 525 575 } 526 576 527 bool try_lock( spinlock * this ) {577 bool try_lock( spinlock * this DEBUG_CTX_PARAM2 ) { 528 578 return this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0; 529 579 } 530 580 531 void lock( spinlock * this ) {581 void lock( spinlock * this DEBUG_CTX_PARAM2 ) { 532 582 for ( unsigned int i = 1;; i += 1 ) { 533 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) break; 534 } 535 } 583 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; } 584 } 585 LIB_DEBUG_DO( 586 this->prev_name = caller; 587 this->prev_thrd = this_thread; 588 ) 589 } 590 591 void lock_yield( spinlock * this DEBUG_CTX_PARAM2 ) { 592 for ( unsigned int i = 1;; i += 1 ) { 593 if ( this->lock == 0 && __sync_lock_test_and_set_4( &this->lock, 1 ) == 0 ) { break; } 594 yield(); 595 } 596 LIB_DEBUG_DO( 597 this->prev_name = caller; 598 this->prev_thrd = this_thread; 599 ) 600 } 601 536 602 537 603 void unlock( spinlock * this ) { … … 539 605 } 540 606 541 void ?{}( signal_once * this ) { 542 this->cond = false; 543 } 544 void ^?{}( signal_once * this ) { 545 546 } 547 548 void wait( signal_once * this ) { 549 lock( &this->lock ); 550 if( !this->cond ) { 551 append( &this->blocked, this_thread() ); 552 ScheduleInternal( &this->lock ); 553 lock( &this->lock ); 554 } 607 void ?{}( semaphore * this, int count = 1 ) { 608 (&this->lock){}; 609 this->count = count; 610 (&this->waiting){}; 611 } 612 void ^?{}(semaphore * this) {} 613 614 void P(semaphore * this) { 615 lock( &this->lock DEBUG_CTX2 ); 616 this->count -= 1; 617 if ( this->count < 0 ) { 618 // queue current task 619 append( &this->waiting, (thread_desc *)this_thread ); 620 621 // atomically release spin lock and block 622 BlockInternal( &this->lock ); 623 } 624 else { 625 unlock( &this->lock ); 626 } 627 } 628 629 void V(semaphore * this) { 630 thread_desc * thrd = NULL; 631 lock( &this->lock DEBUG_CTX2 ); 632 this->count += 1; 633 if ( this->count <= 0 ) { 634 // remove task at head of waiting list 635 thrd = pop_head( &this->waiting ); 636 } 637 555 638 unlock( &this->lock ); 556 } 557 558 void signal( signal_once * this ) { 559 lock( &this->lock ); 560 { 561 this->cond = true; 562 563 thread_desc * it; 564 while( it = pop_head( &this->blocked) ) { 565 ScheduleThread( it ); 566 } 567 } 568 unlock( &this->lock ); 639 640 // make new owner 641 WakeThread( thrd ); 569 642 } 570 643 … … 590 663 } 591 664 head->next = NULL; 592 } 665 } 593 666 return head; 594 667 } … … 609 682 this->top = top->next; 610 683 top->next = NULL; 611 } 684 } 612 685 return top; 613 686 } -
src/libcfa/concurrency/kernel_private.h
rfea3faa rb826e6b 18 18 #define KERNEL_PRIVATE_H 19 19 20 #include "libhdr.h" 21 20 22 #include "kernel" 21 23 #include "thread" … … 23 25 #include "alarm.h" 24 26 25 #include "libhdr.h"26 27 27 28 //----------------------------------------------------------------------------- 28 29 // Scheduler 30 31 extern "C" { 32 void disable_interrupts(); 33 void enable_interrupts_noPoll(); 34 void enable_interrupts( DEBUG_CTX_PARAM ); 35 } 36 29 37 void ScheduleThread( thread_desc * ); 38 static inline void WakeThread( thread_desc * thrd ) { 39 if( !thrd ) return; 40 41 disable_interrupts(); 42 ScheduleThread( thrd ); 43 enable_interrupts( DEBUG_CTX ); 44 } 30 45 thread_desc * nextThread(cluster * this); 31 46 32 void ScheduleInternal(void); 33 void ScheduleInternal(spinlock * lock); 34 void ScheduleInternal(thread_desc * thrd); 35 void ScheduleInternal(spinlock * lock, thread_desc * thrd); 36 void ScheduleInternal(spinlock ** locks, unsigned short count); 37 void ScheduleInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count); 47 //Block current thread and release/wake-up the following resources 48 void BlockInternal(void); 49 void BlockInternal(spinlock * lock); 50 void BlockInternal(thread_desc * thrd); 51 void BlockInternal(spinlock * lock, thread_desc * thrd); 52 void BlockInternal(spinlock ** locks, unsigned short count); 53 void BlockInternal(spinlock ** locks, unsigned short count, thread_desc ** thrds, unsigned short thrd_count); 54 void LeaveThread(spinlock * lock, thread_desc * thrd); 38 55 39 56 //----------------------------------------------------------------------------- … … 49 66 void spin(processor * this, unsigned int * spin_count); 50 67 51 struct system_proc_t { 52 processor proc; 53 68 struct event_kernel_t { 54 69 alarm_list_t alarms; 55 spinlock alarm_lock; 56 57 bool pending_alarm; 70 spinlock lock; 58 71 }; 59 72 60 extern cluster * systemCluster; 61 extern system_proc_t * systemProcessor; 62 extern thread_local processor * this_processor; 73 extern event_kernel_t * event_kernel; 63 74 64 static inline void disable_interrupts() { 65 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 66 assert( prev != (unsigned short) -1 ); 67 } 68 69 static inline void enable_interrupts_noRF() { 70 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 71 verify( prev != (unsigned short) 0 ); 72 } 73 74 static inline void enable_interrupts() { 75 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &this_processor->disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 76 verify( prev != (unsigned short) 0 ); 77 if( prev == 1 && this_processor->pending_preemption ) { 78 ScheduleInternal( this_processor->current_thread ); 79 this_processor->pending_preemption = false; 80 } 81 } 75 extern volatile thread_local processor * this_processor; 76 extern volatile thread_local coroutine_desc * this_coroutine; 77 extern volatile thread_local thread_desc * this_thread; 78 extern volatile thread_local bool preemption_in_progress; 79 extern volatile thread_local unsigned short disable_preempt_count; 82 80 83 81 //----------------------------------------------------------------------------- … … 90 88 extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst); 91 89 90 //----------------------------------------------------------------------------- 91 // Utils 92 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 93 92 94 #endif //KERNEL_PRIVATE_H 93 95 -
src/libcfa/concurrency/monitor
rfea3faa rb826e6b 26 26 static inline void ?{}(monitor_desc * this) { 27 27 this->owner = NULL; 28 this->stack_owner = NULL;29 28 this->recursion = 0; 30 29 } -
src/libcfa/concurrency/monitor.c
rfea3faa rb826e6b 19 19 #include <stdlib> 20 20 21 #include "libhdr.h" 21 22 #include "kernel_private.h" 22 #include "libhdr.h"23 23 24 24 //----------------------------------------------------------------------------- … … 44 44 45 45 extern "C" { 46 void __enter_monitor_desc( monitor_desc * this) {47 lock ( &this->lock);48 thread_desc * thrd = this_thread ();49 50 LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);46 void __enter_monitor_desc( monitor_desc * this ) { 47 lock_yield( &this->lock DEBUG_CTX2 ); 48 thread_desc * thrd = this_thread; 49 50 // LIB_DEBUG_PRINT_SAFE("%p Entering %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion); 51 51 52 52 if( !this->owner ) { … … 62 62 //Some one else has the monitor, wait in line for it 63 63 append( &this->entry_queue, thrd ); 64 LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd);65 ScheduleInternal( &this->lock );66 67 // ScheduleInternal will unlock spinlock, no need to unlock ourselves68 return; 64 // LIB_DEBUG_PRINT_SAFE("%p Blocking on entry\n", thrd); 65 BlockInternal( &this->lock ); 66 67 //BlockInternal will unlock spinlock, no need to unlock ourselves 68 return; 69 69 } 70 70 … … 75 75 // leave pseudo code : 76 76 // TODO 77 void __leave_monitor_desc( monitor_desc * this) {78 lock ( &this->lock);79 80 LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i)\n", thrd, this, this->owner, this->recursion);81 verifyf( this_thread () == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread(), this->owner, this->recursion );77 void __leave_monitor_desc( monitor_desc * this ) { 78 lock_yield( &this->lock DEBUG_CTX2 ); 79 80 // LIB_DEBUG_PRINT_SAFE("%p Leaving %p (o: %p, r: %i). ", this_thread, this, this->owner, this->recursion); 81 verifyf( this_thread == this->owner, "Expected owner to be %p, got %p (r: %i)", this_thread, this->owner, this->recursion ); 82 82 83 83 //Leaving a recursion level, decrement the counter … … 96 96 unlock( &this->lock ); 97 97 98 LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner);98 // LIB_DEBUG_PRINT_SAFE("Next owner is %p\n", new_owner); 99 99 100 100 //We need to wake-up the thread 101 ScheduleThread( new_owner ); 101 WakeThread( new_owner ); 102 } 103 104 void __leave_thread_monitor( thread_desc * thrd ) { 105 monitor_desc * this = &thrd->mon; 106 lock_yield( &this->lock DEBUG_CTX2 ); 107 108 disable_interrupts(); 109 110 thrd->cor.state = Halted; 111 112 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i)", thrd, this->owner, this->recursion ); 113 114 //Leaving a recursion level, decrement the counter 115 this->recursion -= 1; 116 117 //If we haven't left the last level of recursion 118 //it means we don't need to do anything 119 if( this->recursion != 0) { 120 unlock( &this->lock ); 121 return; 122 } 123 124 thread_desc * new_owner = next_thread( this ); 125 126 LeaveThread( &this->lock, new_owner ); 102 127 } 103 128 } … … 121 146 enter( this->m, this->count ); 122 147 123 this->prev_mntrs = this_thread ()->current_monitors;124 this->prev_count = this_thread ()->current_monitor_count;125 126 this_thread ()->current_monitors = m;127 this_thread ()->current_monitor_count = count;148 this->prev_mntrs = this_thread->current_monitors; 149 this->prev_count = this_thread->current_monitor_count; 150 151 this_thread->current_monitors = m; 152 this_thread->current_monitor_count = count; 128 153 } 129 154 … … 131 156 leave( this->m, this->count ); 132 157 133 this_thread ()->current_monitors = this->prev_mntrs;134 this_thread ()->current_monitor_count = this->prev_count;158 this_thread->current_monitors = this->prev_mntrs; 159 this_thread->current_monitor_count = this->prev_count; 135 160 } 136 161 … … 159 184 // Internal scheduling 160 185 void wait( condition * this, uintptr_t user_info = 0 ) { 161 LIB_DEBUG_PRINT_SAFE("Waiting\n");186 // LIB_DEBUG_PRINT_SAFE("Waiting\n"); 162 187 163 188 brand_condition( this ); … … 170 195 unsigned short count = this->monitor_count; 171 196 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 172 spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal173 174 LIB_DEBUG_PRINT_SAFE("count %i\n", count);175 176 __condition_node_t waiter = { this_thread(), count, user_info };197 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 198 199 // LIB_DEBUG_PRINT_SAFE("count %i\n", count); 200 201 __condition_node_t waiter = { (thread_desc*)this_thread, count, user_info }; 177 202 178 203 __condition_criterion_t criteria[count]; 179 204 for(int i = 0; i < count; i++) { 180 205 (&criteria[i]){ this->monitors[i], &waiter }; 181 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );206 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 182 207 } 183 208 … … 201 226 } 202 227 203 LIB_DEBUG_PRINT_SAFE("Will unblock: ");228 // LIB_DEBUG_PRINT_SAFE("Will unblock: "); 204 229 for(int i = 0; i < thread_count; i++) { 205 LIB_DEBUG_PRINT_SAFE("%p ", threads[i]);206 } 207 LIB_DEBUG_PRINT_SAFE("\n");230 // LIB_DEBUG_PRINT_SAFE("%p ", threads[i]); 231 } 232 // LIB_DEBUG_PRINT_SAFE("\n"); 208 233 209 234 // Everything is ready to go to sleep 210 ScheduleInternal( locks, count, threads, thread_count );235 BlockInternal( locks, count, threads, thread_count ); 211 236 212 237 … … 222 247 bool signal( condition * this ) { 223 248 if( is_empty( this ) ) { 224 LIB_DEBUG_PRINT_SAFE("Nothing to signal\n");249 // LIB_DEBUG_PRINT_SAFE("Nothing to signal\n"); 225 250 return false; 226 251 } … … 231 256 232 257 unsigned short count = this->monitor_count; 233 258 234 259 //Some more checking in debug 235 260 LIB_DEBUG_DO( 236 thread_desc * this_thrd = this_thread ();261 thread_desc * this_thrd = this_thread; 237 262 if ( this->monitor_count != this_thrd->current_monitor_count ) { 238 263 abortf( "Signal on condition %p made with different number of monitor(s), expected %i got %i", this, this->monitor_count, this_thrd->current_monitor_count ); … … 248 273 //Lock all the monitors 249 274 lock_all( this->monitors, NULL, count ); 250 LIB_DEBUG_PRINT_SAFE("Signalling");275 // LIB_DEBUG_PRINT_SAFE("Signalling"); 251 276 252 277 //Pop the head of the waiting queue … … 256 281 for(int i = 0; i < count; i++) { 257 282 __condition_criterion_t * crit = &node->criteria[i]; 258 LIB_DEBUG_PRINT_SAFE(" %p", crit->target);283 // LIB_DEBUG_PRINT_SAFE(" %p", crit->target); 259 284 assert( !crit->ready ); 260 285 push( &crit->target->signal_stack, crit ); 261 286 } 262 287 263 LIB_DEBUG_PRINT_SAFE("\n");288 // LIB_DEBUG_PRINT_SAFE("\n"); 264 289 265 290 //Release … … 281 306 unsigned short count = this->monitor_count; 282 307 unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 283 spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal308 spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 284 309 285 310 lock_all( this->monitors, locks, count ); 286 311 287 312 //create creteria 288 __condition_node_t waiter = { this_thread(), count, 0 };313 __condition_node_t waiter = { (thread_desc*)this_thread, count, 0 }; 289 314 290 315 __condition_criterion_t criteria[count]; 291 316 for(int i = 0; i < count; i++) { 292 317 (&criteria[i]){ this->monitors[i], &waiter }; 293 LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] );318 // LIB_DEBUG_PRINT_SAFE( "Criterion %p\n", &criteria[i] ); 294 319 push( &criteria[i].target->signal_stack, &criteria[i] ); 295 320 } … … 309 334 310 335 //Everything is ready to go to sleep 311 ScheduleInternal( locks, count, &signallee, 1 );336 BlockInternal( locks, count, &signallee, 1 ); 312 337 313 338 … … 325 350 326 351 uintptr_t front( condition * this ) { 327 verifyf( !is_empty(this), 352 verifyf( !is_empty(this), 328 353 "Attempt to access user data on an empty condition.\n" 329 354 "Possible cause is not checking if the condition is empty before reading stored data." … … 335 360 // Internal scheduling 336 361 void __accept_internal( unsigned short count, __acceptable_t * acceptables, void (*func)(void) ) { 337 // thread_desc * this = this_thread ();362 // thread_desc * this = this_thread; 338 363 339 364 // unsigned short count = this->current_monitor_count; 340 365 // unsigned int recursions[ count ]; //Save the current recursion levels to restore them later 341 // spinlock * locks [ count ]; //We need to pass-in an array of locks to ScheduleInternal366 // spinlock * locks [ count ]; //We need to pass-in an array of locks to BlockInternal 342 367 343 368 // lock_all( this->current_monitors, locks, count ); … … 348 373 349 374 // // // Everything is ready to go to sleep 350 // // ScheduleInternal( locks, count, threads, thread_count );375 // // BlockInternal( locks, count, threads, thread_count ); 351 376 352 377 … … 393 418 static inline void lock_all( spinlock ** locks, unsigned short count ) { 394 419 for( int i = 0; i < count; i++ ) { 395 lock ( locks[i]);420 lock_yield( locks[i] DEBUG_CTX2 ); 396 421 } 397 422 } … … 400 425 for( int i = 0; i < count; i++ ) { 401 426 spinlock * l = &source[i]->lock; 402 lock ( l);427 lock_yield( l DEBUG_CTX2 ); 403 428 if(locks) locks[i] = l; 404 429 } … … 443 468 for( int i = 0; i < count; i++ ) { 444 469 445 LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target );470 // LIB_DEBUG_PRINT_SAFE( "Checking %p for %p\n", &criteria[i], target ); 446 471 if( &criteria[i] == target ) { 447 472 criteria[i].ready = true; 448 LIB_DEBUG_PRINT_SAFE( "True\n" );473 // LIB_DEBUG_PRINT_SAFE( "True\n" ); 449 474 } 450 475 … … 452 477 } 453 478 454 LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run );479 // LIB_DEBUG_PRINT_SAFE( "Runing %i\n", ready2run ); 455 480 return ready2run ? node->waiting_thread : NULL; 456 481 } 457 482 458 483 static inline void brand_condition( condition * this ) { 459 thread_desc * thrd = this_thread ();484 thread_desc * thrd = this_thread; 460 485 if( !this->monitors ) { 461 LIB_DEBUG_PRINT_SAFE("Branding\n");486 // LIB_DEBUG_PRINT_SAFE("Branding\n"); 462 487 assertf( thrd->current_monitors != NULL, "No current monitor to brand condition", thrd->current_monitors ); 463 488 this->monitor_count = thrd->current_monitor_count; -
src/libcfa/concurrency/preemption.c
rfea3faa rb826e6b 15 15 // 16 16 17 #include "libhdr.h" 17 18 #include "preemption.h" 18 19 19 20 extern "C" { 21 #include <errno.h> 22 #include <execinfo.h> 23 #define __USE_GNU 20 24 #include <signal.h> 21 } 22 23 #define __CFA_DEFAULT_PREEMPTION__ 10 24 25 #undef __USE_GNU 26 #include <stdio.h> 27 #include <string.h> 28 #include <unistd.h> 29 } 30 31 32 #ifdef __USE_STREAM__ 33 #include "fstream" 34 #endif 35 36 //TODO move to defaults 37 #define __CFA_DEFAULT_PREEMPTION__ 10000 38 39 //TODO move to defaults 25 40 __attribute__((weak)) unsigned int default_preemption() { 26 41 return __CFA_DEFAULT_PREEMPTION__; 27 42 } 28 43 44 // Short hands for signal context information 45 #define __CFA_SIGCXT__ ucontext_t * 46 #define __CFA_SIGPARMS__ __attribute__((unused)) int sig, __attribute__((unused)) siginfo_t *sfp, __attribute__((unused)) __CFA_SIGCXT__ cxt 47 48 // FwdDeclarations : timeout handlers 29 49 static void preempt( processor * this ); 30 50 static void timeout( thread_desc * this ); 31 51 52 // FwdDeclarations : Signal handlers 53 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ); 54 void sigHandler_segv ( __CFA_SIGPARMS__ ); 55 void sigHandler_abort ( __CFA_SIGPARMS__ ); 56 57 // FwdDeclarations : sigaction wrapper 58 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ); 59 60 // FwdDeclarations : alarm thread main 61 void * alarm_loop( __attribute__((unused)) void * args ); 62 63 // Machine specific register name 64 #ifdef __x86_64__ 65 #define CFA_REG_IP REG_RIP 66 #else 67 #define CFA_REG_IP REG_EIP 68 #endif 69 70 KERNEL_STORAGE(event_kernel_t, event_kernel); // private storage for event kernel 71 event_kernel_t * event_kernel; // kernel public handle to even kernel 72 static pthread_t alarm_thread; // pthread handle to alarm thread 73 74 void ?{}(event_kernel_t * this) { 75 (&this->alarms){}; 76 (&this->lock){}; 77 } 78 32 79 //============================================================================================= 33 80 // Kernel Preemption logic 34 81 //============================================================================================= 35 82 36 void kernel_start_preemption() { 37 38 } 39 83 // Get next expired node 84 static inline alarm_node_t * get_expired( alarm_list_t * alarms, __cfa_time_t currtime ) { 85 if( !alarms->head ) return NULL; // If no alarms return null 86 if( alarms->head->alarm >= currtime ) return NULL; // If alarms head not expired return null 87 return pop(alarms); // Otherwise just pop head 88 } 89 90 // Tick one frame of the Discrete Event Simulation for alarms 40 91 void tick_preemption() { 41 alarm_list_t * alarms = &systemProcessor->alarms; 42 __cfa_time_t currtime = __kernel_get_time(); 43 while( alarms->head && alarms->head->alarm < currtime ) { 44 alarm_node_t * node = pop(alarms); 92 alarm_node_t * node = NULL; // Used in the while loop but cannot be declared in the while condition 93 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading 94 __cfa_time_t currtime = __kernel_get_time(); // Check current time once so we everything "happens at once" 95 96 //Loop throught every thing expired 97 while( node = get_expired( alarms, currtime ) ) { 98 99 // Check if this is a kernel 45 100 if( node->kernel_alarm ) { 46 101 preempt( node->proc ); … … 50 105 } 51 106 52 if( node->period > 0 ) { 53 node->alarm += node->period; 54 insert( alarms, node ); 107 // Check if this is a periodic alarm 108 __cfa_time_t period = node->period; 109 if( period > 0 ) { 110 node->alarm = currtime + period; // Alarm is periodic, add currtime to it (used cached current time) 111 insert( alarms, node ); // Reinsert the node for the next time it triggers 55 112 } 56 113 else { 57 node->set = false; 58 } 59 } 60 61 if( alarms->head ) {62 __kernel_set_timer( alarms->head->alarm - currtime );63 64 } 65 114 node->set = false; // Node is one-shot, just mark it as not pending 115 } 116 } 117 118 // If there are still alarms pending, reset the timer 119 if( alarms->head ) { __kernel_set_timer( alarms->head->alarm - currtime ); } 120 } 121 122 // Update the preemption of a processor and notify interested parties 66 123 void update_preemption( processor * this, __cfa_time_t duration ) { 67 // assert( THREAD_GETMEM( disableInt ) && THREAD_GETMEM( disableIntCnt ) == 1 );68 124 alarm_node_t * alarm = this->preemption_alarm; 69 125 … … 89 145 } 90 146 147 //============================================================================================= 148 // Kernel Signal Tools 149 //============================================================================================= 150 151 LIB_DEBUG_DO( static thread_local void * last_interrupt = 0; ) 152 153 extern "C" { 154 // Disable interrupts by incrementing the counter 155 void disable_interrupts() { 156 __attribute__((unused)) unsigned short new_val = __atomic_add_fetch_2( &disable_preempt_count, 1, __ATOMIC_SEQ_CST ); 157 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 158 } 159 160 // Enable interrupts by decrementing the counter 161 // If counter reaches 0, execute any pending CtxSwitch 162 void enable_interrupts( DEBUG_CTX_PARAM ) { 163 processor * proc = this_processor; // Cache the processor now since interrupts can start happening after the atomic add 164 thread_desc * thrd = this_thread; // Cache the thread now since interrupts can start happening after the atomic add 165 166 unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 167 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 168 169 // Check if we need to prempt the thread because an interrupt was missed 170 if( prev == 1 && proc->pending_preemption ) { 171 proc->pending_preemption = false; 172 BlockInternal( thrd ); 173 } 174 175 // For debugging purposes : keep track of the last person to enable the interrupts 176 LIB_DEBUG_DO( proc->last_enable = caller; ) 177 } 178 179 // Disable interrupts by incrementint the counter 180 // Don't execute any pending CtxSwitch even if counter reaches 0 181 void enable_interrupts_noPoll() { 182 __attribute__((unused)) unsigned short prev = __atomic_fetch_add_2( &disable_preempt_count, -1, __ATOMIC_SEQ_CST ); 183 verify( prev != 0u ); // If this triggers someone is enabled already enabled interrupts 184 } 185 } 186 187 // sigprocmask wrapper : unblock a single signal 188 static inline void signal_unblock( int sig ) { 189 sigset_t mask; 190 sigemptyset( &mask ); 191 sigaddset( &mask, sig ); 192 193 if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL ) == -1 ) { 194 abortf( "internal error, pthread_sigmask" ); 195 } 196 } 197 198 // sigprocmask wrapper : block a single signal 199 static inline void signal_block( int sig ) { 200 sigset_t mask; 201 sigemptyset( &mask ); 202 sigaddset( &mask, sig ); 203 204 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 205 abortf( "internal error, pthread_sigmask" ); 206 } 207 } 208 209 // kill wrapper : signal a processor 210 static void preempt( processor * this ) { 211 pthread_kill( this->kernel_thread, SIGUSR1 ); 212 } 213 214 // reserved for future use 215 static void timeout( thread_desc * this ) { 216 //TODO : implement waking threads 217 } 218 219 220 // Check if a CtxSwitch signal handler shoud defer 221 // If true : preemption is safe 222 // If false : preemption is unsafe and marked as pending 223 static inline bool preemption_ready() { 224 bool ready = disable_preempt_count == 0 && !preemption_in_progress; // Check if preemption is safe 225 this_processor->pending_preemption = !ready; // Adjust the pending flag accordingly 226 return ready; 227 } 228 229 //============================================================================================= 230 // Kernel Signal Startup/Shutdown logic 231 //============================================================================================= 232 233 // Startup routine to activate preemption 234 // Called from kernel_startup 235 void kernel_start_preemption() { 236 LIB_DEBUG_PRINT_SAFE("Kernel : Starting preemption\n"); 237 238 // Start with preemption disabled until ready 239 disable_preempt_count = 1; 240 241 // Initialize the event kernel 242 event_kernel = (event_kernel_t *)&storage_event_kernel; 243 event_kernel{}; 244 245 // Setup proper signal handlers 246 __kernel_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO ); // CtxSwitch handler 247 // __kernel_sigaction( SIGSEGV, sigHandler_segv , SA_SIGINFO ); // Failure handler 248 // __kernel_sigaction( SIGBUS , sigHandler_segv , SA_SIGINFO ); // Failure handler 249 250 signal_block( SIGALRM ); 251 252 pthread_create( &alarm_thread, NULL, alarm_loop, NULL ); 253 } 254 255 // Shutdown routine to deactivate preemption 256 // Called from kernel_shutdown 257 void kernel_stop_preemption() { 258 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopping\n"); 259 260 // Block all signals since we are already shutting down 261 sigset_t mask; 262 sigfillset( &mask ); 263 sigprocmask( SIG_BLOCK, &mask, NULL ); 264 265 // Notify the alarm thread of the shutdown 266 sigval val = { 1 }; 267 pthread_sigqueue( alarm_thread, SIGALRM, val ); 268 269 // Wait for the preemption thread to finish 270 pthread_join( alarm_thread, NULL ); 271 272 // Preemption is now fully stopped 273 274 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption stopped\n"); 275 } 276 277 // Raii ctor/dtor for the preemption_scope 278 // Used by thread to control when they want to receive preemption signals 91 279 void ?{}( preemption_scope * this, processor * proc ) { 92 (&this->alarm){ proc };280 (&this->alarm){ proc, zero_time, zero_time }; 93 281 this->proc = proc; 94 282 this->proc->preemption_alarm = &this->alarm; 95 update_preemption( this->proc, this->proc->preemption ); 283 284 update_preemption( this->proc, from_us(this->proc->cltr->preemption) ); 96 285 } 97 286 98 287 void ^?{}( preemption_scope * this ) { 99 update_preemption( this->proc, 0 ); 100 } 101 102 //============================================================================================= 103 // Kernel Signal logic 104 //============================================================================================= 105 106 static inline bool preemption_ready() { 107 return this_processor->disable_preempt_count == 0; 108 } 109 110 static inline void defer_ctxSwitch() { 111 this_processor->pending_preemption = true; 112 } 113 114 static inline void defer_alarm() { 115 systemProcessor->pending_alarm = true; 116 } 117 118 void sigHandler_ctxSwitch( __attribute__((unused)) int sig ) { 119 if( preemption_ready() ) { 120 ScheduleInternal( this_processor->current_thread ); 121 } 122 else { 123 defer_ctxSwitch(); 124 } 125 } 126 127 void sigHandler_alarm( __attribute__((unused)) int sig ) { 128 if( try_lock( &systemProcessor->alarm_lock ) ) { 129 tick_preemption(); 130 unlock( &systemProcessor->alarm_lock ); 131 } 132 else { 133 defer_alarm(); 134 } 135 } 136 137 static void preempt( processor * this ) { 138 pthread_kill( this->kernel_thread, SIGUSR1 ); 139 } 140 141 static void timeout( thread_desc * this ) { 142 //TODO : implement waking threads 143 } 288 disable_interrupts(); 289 290 update_preemption( this->proc, zero_time ); 291 } 292 293 //============================================================================================= 294 // Kernel Signal Handlers 295 //============================================================================================= 296 297 // Context switch signal handler 298 // Receives SIGUSR1 signal and causes the current thread to yield 299 void sigHandler_ctxSwitch( __CFA_SIGPARMS__ ) { 300 LIB_DEBUG_DO( last_interrupt = (void *)(cxt->uc_mcontext.gregs[CFA_REG_IP]); ) 301 302 // Check if it is safe to preempt here 303 if( !preemption_ready() ) { return; } 304 305 preemption_in_progress = true; // Sync flag : prevent recursive calls to the signal handler 306 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 307 preemption_in_progress = false; // Clear the in progress flag 308 309 // Preemption can occur here 310 311 BlockInternal( (thread_desc*)this_thread ); // Do the actual CtxSwitch 312 } 313 314 // Main of the alarm thread 315 // Waits on SIGALRM and send SIGUSR1 to whom ever needs it 316 void * alarm_loop( __attribute__((unused)) void * args ) { 317 // Block sigalrms to control when they arrive 318 sigset_t mask; 319 sigemptyset( &mask ); 320 sigaddset( &mask, SIGALRM ); 321 322 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 323 abortf( "internal error, pthread_sigmask" ); 324 } 325 326 // Main loop 327 while( true ) { 328 // Wait for a sigalrm 329 siginfo_t info; 330 int sig = sigwaitinfo( &mask, &info ); 331 332 // If another signal arrived something went wrong 333 assertf(sig == SIGALRM, "Kernel Internal Error, sigwait: Unexpected signal %d (%d : %d)\n", sig, info.si_code, info.si_value.sival_int); 334 335 LIB_DEBUG_PRINT_SAFE("Kernel : Caught alarm from %d with %d\n", info.si_code, info.si_value.sival_int ); 336 // Switch on the code (a.k.a. the sender) to 337 switch( info.si_code ) 338 { 339 // Timers can apparently be marked as sent for the kernel 340 // In either case, tick preemption 341 case SI_TIMER: 342 case SI_KERNEL: 343 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread tick\n"); 344 lock( &event_kernel->lock DEBUG_CTX2 ); 345 tick_preemption(); 346 unlock( &event_kernel->lock ); 347 break; 348 // Signal was not sent by the kernel but by an other thread 349 case SI_QUEUE: 350 // For now, other thread only signal the alarm thread to shut it down 351 // If this needs to change use info.si_value and handle the case here 352 goto EXIT; 353 } 354 } 355 356 EXIT: 357 LIB_DEBUG_PRINT_SAFE("Kernel : Preemption thread stopping\n"); 358 return NULL; 359 } 360 361 // Sigaction wrapper : register an signal handler 362 static void __kernel_sigaction( int sig, void (*handler)(__CFA_SIGPARMS__), int flags ) { 363 struct sigaction act; 364 365 act.sa_sigaction = (void (*)(int, siginfo_t *, void *))handler; 366 act.sa_flags = flags; 367 368 if ( sigaction( sig, &act, NULL ) == -1 ) { 369 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 370 " __kernel_sigaction( sig:%d, handler:%p, flags:%d ), problem installing signal handler, error(%d) %s.\n", 371 sig, handler, flags, errno, strerror( errno ) 372 ); 373 _exit( EXIT_FAILURE ); 374 } 375 } 376 377 // Sigaction wrapper : restore default handler 378 static void __kernel_sigdefault( int sig ) { 379 struct sigaction act; 380 381 act.sa_handler = SIG_DFL; 382 act.sa_flags = 0; 383 sigemptyset( &act.sa_mask ); 384 385 if ( sigaction( sig, &act, NULL ) == -1 ) { 386 LIB_DEBUG_PRINT_BUFFER_DECL( STDERR_FILENO, 387 " __kernel_sigdefault( sig:%d ), problem reseting signal handler, error(%d) %s.\n", 388 sig, errno, strerror( errno ) 389 ); 390 _exit( EXIT_FAILURE ); 391 } 392 } 393 394 //============================================================================================= 395 // Terminating Signals logic 396 //============================================================================================= 397 398 LIB_DEBUG_DO( 399 static void __kernel_backtrace( int start ) { 400 // skip first N stack frames 401 402 enum { Frames = 50 }; 403 void * array[Frames]; 404 int size = backtrace( array, Frames ); 405 char ** messages = backtrace_symbols( array, size ); 406 407 // find executable name 408 *index( messages[0], '(' ) = '\0'; 409 #ifdef __USE_STREAM__ 410 serr | "Stack back trace for:" | messages[0] | endl; 411 #else 412 fprintf( stderr, "Stack back trace for: %s\n", messages[0]); 413 #endif 414 415 // skip last 2 stack frames after main 416 for ( int i = start; i < size && messages != NULL; i += 1 ) { 417 char * name = NULL; 418 char * offset_begin = NULL; 419 char * offset_end = NULL; 420 421 for ( char *p = messages[i]; *p; ++p ) { 422 // find parantheses and +offset 423 if ( *p == '(' ) { 424 name = p; 425 } 426 else if ( *p == '+' ) { 427 offset_begin = p; 428 } 429 else if ( *p == ')' ) { 430 offset_end = p; 431 break; 432 } 433 } 434 435 // if line contains symbol print it 436 int frameNo = i - start; 437 if ( name && offset_begin && offset_end && name < offset_begin ) { 438 // delimit strings 439 *name++ = '\0'; 440 *offset_begin++ = '\0'; 441 *offset_end++ = '\0'; 442 443 #ifdef __USE_STREAM__ 444 serr | "(" | frameNo | ")" | messages[i] | ":" 445 | name | "+" | offset_begin | offset_end | endl; 446 #else 447 fprintf( stderr, "(%i) %s : %s + %s %s\n", frameNo, messages[i], name, offset_begin, offset_end); 448 #endif 449 } 450 // otherwise, print the whole line 451 else { 452 #ifdef __USE_STREAM__ 453 serr | "(" | frameNo | ")" | messages[i] | endl; 454 #else 455 fprintf( stderr, "(%i) %s\n", frameNo, messages[i] ); 456 #endif 457 } 458 } 459 460 free( messages ); 461 } 462 ) 463 464 // void sigHandler_segv( __CFA_SIGPARMS__ ) { 465 // LIB_DEBUG_DO( 466 // #ifdef __USE_STREAM__ 467 // serr | "*CFA runtime error* program cfa-cpp terminated with" 468 // | (sig == SIGSEGV ? "segment fault." : "bus error.") 469 // | endl; 470 // #else 471 // fprintf( stderr, "*CFA runtime error* program cfa-cpp terminated with %s\n", sig == SIGSEGV ? "segment fault." : "bus error." ); 472 // #endif 473 474 // // skip first 2 stack frames 475 // __kernel_backtrace( 1 ); 476 // ) 477 // exit( EXIT_FAILURE ); 478 // } 479 480 // void sigHandler_abort( __CFA_SIGPARMS__ ) { 481 // // skip first 6 stack frames 482 // LIB_DEBUG_DO( __kernel_backtrace( 6 ); ) 483 484 // // reset default signal handler 485 // __kernel_sigdefault( SIGABRT ); 486 487 // raise( SIGABRT ); 488 // } -
src/libcfa/concurrency/thread
rfea3faa rb826e6b 54 54 } 55 55 56 thread_desc * this_thread(void);56 extern volatile thread_local thread_desc * this_thread; 57 57 58 58 forall( dtype T | is_thread(T) ) -
src/libcfa/concurrency/thread.c
rfea3faa rb826e6b 28 28 } 29 29 30 extern thread_local processor * this_processor;30 extern volatile thread_local processor * this_processor; 31 31 32 32 //----------------------------------------------------------------------------- … … 71 71 coroutine_desc* thrd_c = get_coroutine(this); 72 72 thread_desc* thrd_h = get_thread (this); 73 thrd_c->last = this_coroutine(); 74 this_processor->current_coroutine = thrd_c; 73 thrd_c->last = this_coroutine; 75 74 76 LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h);75 // LIB_DEBUG_PRINT_SAFE("Thread start : %p (t %p, c %p)\n", this, thrd_c, thrd_h); 77 76 77 disable_interrupts(); 78 78 create_stack(&thrd_c->stack, thrd_c->stack.size); 79 this_coroutine = thrd_c; 79 80 CtxStart(this, CtxInvokeThread); 81 assert( thrd_c->last->stack.context ); 80 82 CtxSwitch( thrd_c->last->stack.context, thrd_c->stack.context ); 81 83 82 84 ScheduleThread(thrd_h); 85 enable_interrupts( DEBUG_CTX ); 83 86 } 84 87 85 88 void yield( void ) { 86 ScheduleInternal( this_processor->current_thread );89 BlockInternal( (thread_desc *)this_thread ); 87 90 } 88 91 … … 95 98 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 96 99 // set state of current coroutine to inactive 97 src->state = Inactive;100 src->state = src->state == Halted ? Halted : Inactive; 98 101 dst->state = Active; 99 102 … … 103 106 // set new coroutine that the processor is executing 104 107 // and context switch to it 105 this_processor->current_coroutine = dst; 108 this_coroutine = dst; 109 assert( src->stack.context ); 106 110 CtxSwitch( src->stack.context, dst->stack.context ); 107 this_ processor->current_coroutine = src;111 this_coroutine = src; 108 112 109 113 // set state of new coroutine to active 110 dst->state = Inactive;114 dst->state = dst->state == Halted ? Halted : Inactive; 111 115 src->state = Active; 112 116 }
Note:
See TracChangeset
for help on using the changeset viewer.