Changeset ff29f08 for src/libcfa/concurrency
- Timestamp:
- May 18, 2018, 2:09:21 PM (7 years ago)
- Branches:
- new-env, with_gc
- Children:
- 2472a19
- Parents:
- f6f0cca3 (diff), c7d8100c (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/coroutine
rf6f0cca3 rff29f08 32 32 //----------------------------------------------------------------------------- 33 33 // Ctors and dtors 34 void ?{}(coStack_t & this); 35 void ?{}(coroutine_desc & this); 36 void ?{}(coroutine_desc & this, const char * name); 37 void ^?{}(coStack_t & this); 38 void ^?{}(coroutine_desc & this); 34 // void ?{}( coStack_t & this ); 35 // void ^?{}( coStack_t & this ); 36 37 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ); 38 void ^?{}( coroutine_desc & this ); 39 40 static inline void ?{}( coroutine_desc & this) { this{ "Anonymous Coroutine", NULL, 0 }; } 41 static inline void ?{}( coroutine_desc & this, size_t stackSize) { this{ "Anonymous Coroutine", NULL, stackSize }; } 42 static inline void ?{}( coroutine_desc & this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; } 43 static inline void ?{}( coroutine_desc & this, const char * name) { this{ name, NULL, 0 }; } 44 static inline void ?{}( coroutine_desc & this, const char * name, size_t stackSize ) { this{ name, NULL, stackSize }; } 39 45 40 46 //----------------------------------------------------------------------------- … … 66 72 // Suspend implementation inlined for performance 67 73 static inline void suspend() { 68 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 74 // optimization : read TLS once and reuse it 75 // Safety note: this is preemption safe since if 76 // preemption occurs after this line, the pointer 77 // will also migrate which means this value will 78 // stay in syn with the TLS 79 coroutine_desc * src = TL_GET( this_coroutine ); 69 80 70 81 assertf( src->last != 0, … … 83 94 forall(dtype T | is_coroutine(T)) 84 95 static inline void resume(T & cor) { 85 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 96 // optimization : read TLS once and reuse it 97 // Safety note: this is preemption safe since if 98 // preemption occurs after this line, the pointer 99 // will also migrate which means this value will 100 // stay in syn with the TLS 101 coroutine_desc * src = TL_GET( this_coroutine ); 86 102 coroutine_desc * dst = get_coroutine(cor); 87 103 … … 101 117 dst->last = src; 102 118 dst->starter = dst->starter ? dst->starter : src; 103 } // if119 } 104 120 105 121 // always done for performance testing … … 108 124 109 125 static inline void resume(coroutine_desc * dst) { 110 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 126 // optimization : read TLS once and reuse it 127 // Safety note: this is preemption safe since if 128 // preemption occurs after this line, the pointer 129 // will also migrate which means this value will 130 // stay in syn with the TLS 131 coroutine_desc * src = TL_GET( this_coroutine ); 111 132 112 133 // not resuming self ? … … 119 140 // set last resumer 120 141 dst->last = src; 121 } // if142 } 122 143 123 144 // always done for performance testing -
src/libcfa/concurrency/coroutine.c
rf6f0cca3 rff29f08 39 39 //----------------------------------------------------------------------------- 40 40 // Coroutine ctors and dtors 41 void ?{}(coStack_t& this) with( this ) { 42 size = 65000; // size of stack 43 storage = NULL; // pointer to stack 44 limit = NULL; // stack grows towards stack limit 45 base = NULL; // base of stack 46 context = NULL; // address of cfa_context_t 47 top = NULL; // address of top of storage 48 userStack = false; 49 } 50 51 void ?{}(coStack_t& this, size_t size) { 52 this{}; 53 this.size = size; 54 55 create_stack(&this, this.size); 56 } 57 58 void ?{}(coroutine_desc& this) { 59 this{ "Anonymous Coroutine" }; 60 } 61 62 void ?{}(coroutine_desc& this, const char * name) with( this ) { 63 this.name = name; 64 errno_ = 0; 65 state = Start; 66 starter = NULL; 67 last = NULL; 68 } 69 70 void ?{}(coroutine_desc& this, size_t size) { 71 this{}; 72 (this.stack){size}; 41 void ?{}( coStack_t & this, void * storage, size_t storageSize ) with( this ) { 42 size = storageSize == 0 ? 65000 : storageSize; // size of stack 43 this.storage = storage; // pointer to stack 44 limit = NULL; // stack grows towards stack limit 45 base = NULL; // base of stack 46 context = NULL; // address of cfa_context_t 47 top = NULL; // address of top of storage 48 userStack = storage != NULL; 73 49 } 74 50 75 51 void ^?{}(coStack_t & this) { 76 if ( ! this.userStack && this.storage ) { 77 __cfaabi_dbg_debug_do( 78 if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) { 79 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 80 } 81 ); 82 free( this.storage ); 83 } 52 if ( ! this.userStack && this.storage ) { 53 __cfaabi_dbg_debug_do( 54 if ( mprotect( this.storage, pageSize, PROT_READ | PROT_WRITE ) == -1 ) { 55 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) ); 56 } 57 ); 58 free( this.storage ); 59 } 60 } 61 62 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) { 63 (this.stack){storage, storageSize}; 64 this.name = name; 65 errno_ = 0; 66 state = Start; 67 starter = NULL; 68 last = NULL; 84 69 } 85 70 … … 90 75 forall(dtype T | is_coroutine(T)) 91 76 void prime(T& cor) { 92 93 77 coroutine_desc* this = get_coroutine(cor); 78 assert(this->state == Start); 94 79 95 96 80 this->state = Primed; 81 resume(cor); 97 82 } 98 83 99 84 // Wrapper for co 100 85 void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 101 verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate ); 102 disable_interrupts(); 86 // Safety note : This could cause some false positives due to preemption 87 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 88 disable_interrupts(); 103 89 104 105 90 // set state of current coroutine to inactive 91 src->state = src->state == Halted ? Halted : Inactive; 106 92 107 108 TL_SET( this_coroutine, dst );93 // set new coroutine that task is executing 94 kernelTLS.this_coroutine = dst; 109 95 110 111 112 113 96 // context switch to specified coroutine 97 assert( src->stack.context ); 98 CtxSwitch( src->stack.context, dst->stack.context ); 99 // when CtxSwitch returns we are back in the src coroutine 114 100 115 116 101 // set state of new coroutine to active 102 src->state = Active; 117 103 118 enable_interrupts( __cfaabi_dbg_ctx ); 119 verify( TL_GET( preemption_state ).enabled || TL_GET( this_processor )->do_terminate ); 104 enable_interrupts( __cfaabi_dbg_ctx ); 105 // Safety note : This could cause some false positives due to preemption 106 verify( TL_GET( preemption_state.enabled ) || TL_GET( this_processor )->do_terminate ); 120 107 } //ctxSwitchDirect 121 108 122 109 void create_stack( coStack_t* this, unsigned int storageSize ) with( *this ) { 123 124 110 //TEMP HACK do this on proper kernel startup 111 if(pageSize == 0ul) pageSize = sysconf( _SC_PAGESIZE ); 125 112 126 113 size_t cxtSize = libCeiling( sizeof(machine_context_t), 8 ); // minimum alignment 127 114 128 if ( (intptr_t)storage == 0 ) { 129 userStack = false; 130 size = libCeiling( storageSize, 16 ); 131 // use malloc/memalign because "new" raises an exception for out-of-memory 115 if ( !storage ) { 116 __cfaabi_dbg_print_safe("Kernel : Creating stack of size %zu for stack obj %p\n", cxtSize + size + 8, this); 132 117 133 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment 134 __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ));135 __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) ); 118 userStack = false; 119 size = libCeiling( storageSize, 16 ); 120 // use malloc/memalign because "new" raises an exception for out-of-memory 136 121 137 __cfaabi_dbg_debug_do( 138 if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) { 139 abort( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) ); 140 } // if 141 ); 122 // assume malloc has 8 byte alignment so add 8 to allow rounding up to 16 byte alignment 123 __cfaabi_dbg_debug_do( storage = memalign( pageSize, cxtSize + size + pageSize ) ); 124 __cfaabi_dbg_no_debug_do( storage = malloc( cxtSize + size + 8 ) ); 142 125 143 if ( (intptr_t)storage == 0 ) { 144 abort( "Attempt to allocate %zd bytes of storage for coroutine or task execution-state but insufficient memory available.", size ); 145 } // if 126 __cfaabi_dbg_debug_do( 127 if ( mprotect( storage, pageSize, PROT_NONE ) == -1 ) { 128 abort( "(uMachContext &)%p.createContext() : internal error, mprotect failure, error(%d) %s.", this, (int)errno, strerror( (int)errno ) ); 129 } // if 130 ); 146 131 147 __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize ); 148 __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment 132 if ( (intptr_t)storage == 0 ) { 133 abort( "Attempt to allocate %zd bytes of storage for coroutine or task execution-state but insufficient memory available.", size ); 134 } // if 149 135 150 } else { 151 assertf( ((size_t)storage & (libAlign() - 1)) != 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() ); 152 userStack = true; 153 size = storageSize - cxtSize; 136 __cfaabi_dbg_debug_do( limit = (char *)storage + pageSize ); 137 __cfaabi_dbg_no_debug_do( limit = (char *)libCeiling( (unsigned long)storage, 16 ) ); // minimum alignment 154 138 155 if ( size % 16 != 0u ) size -= 8; 139 } else { 140 __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%u bytes)\n", this, storage, storageSize); 156 141 157 limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment 158 } // if 159 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize );142 assertf( ((size_t)storage & (libAlign() - 1)) == 0ul, "Stack storage %p for task/coroutine must be aligned on %d byte boundary.", storage, (int)libAlign() ); 143 userStack = true; 144 size = storageSize - cxtSize; 160 145 161 base = (char *)limit + size; 162 context = base; 163 top = (char *)context + cxtSize; 146 if ( size % 16 != 0u ) size -= 8; 147 148 limit = (char *)libCeiling( (unsigned long)storage, 16 ); // minimum alignment 149 } // if 150 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %d bytes for a stack.", size, MinStackSize ); 151 152 base = (char *)limit + size; 153 context = base; 154 top = (char *)context + cxtSize; 164 155 } 165 156 … … 167 158 // is not inline (We can't inline Cforall in C) 168 159 extern "C" { 169 170 171 160 void __suspend_internal(void) { 161 suspend(); 162 } 172 163 173 174 164 void __leave_coroutine(void) { 165 coroutine_desc * src = TL_GET( this_coroutine ); // optimization 175 166 176 177 178 179 180 181 182 183 167 assertf( src->starter != 0, 168 "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n" 169 "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.", 170 src->name, src ); 171 assertf( src->starter->state != Halted, 172 "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n" 173 "Possible cause is terminated coroutine's main routine has already returned.", 174 src->name, src, src->starter->name, src->starter ); 184 175 185 186 176 CoroutineCtxSwitch( src, src->starter ); 177 } 187 178 } 188 179 -
src/libcfa/concurrency/invoke.c
rf6f0cca3 rff29f08 69 69 // Fetch the thread handle from the user defined thread structure 70 70 struct thread_desc* thrd = get_thread( this ); 71 thrd->self_cor.last = NULL; 71 72 72 73 // Officially start the thread by enabling preemption -
src/libcfa/concurrency/invoke.h
rf6f0cca3 rff29f08 18 18 #include "bits/locks.h" 19 19 20 #define TL_GET( member ) kernelT hreadData.member21 #define TL_SET( member, value ) kernelT hreadData.member = value;20 #define TL_GET( member ) kernelTLS.member 21 #define TL_SET( member, value ) kernelTLS.member = value; 22 22 23 23 #ifdef __cforall … … 44 44 volatile bool in_progress; 45 45 } preemption_state; 46 } kernelT hreadData;46 } kernelTLS; 47 47 } 48 48 49 49 static inline struct coroutine_desc * volatile active_coroutine() { return TL_GET( this_coroutine ); } 50 static inline struct thread_desc * volatile active_thread() { return TL_GET( this_thread); }51 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); }50 static inline struct thread_desc * volatile active_thread () { return TL_GET( this_thread ); } 51 static inline struct processor * volatile active_processor() { return TL_GET( this_processor ); } // UNSAFE 52 52 #endif 53 53 … … 126 126 struct monitor_desc * self_mon_p; 127 127 128 // pointer to the cluster on which the thread is running 129 struct cluster * curr_cluster; 130 128 131 // monitors currently held by this thread 129 132 struct __monitor_group_t monitors; … … 133 136 struct thread_desc * next; 134 137 135 __cfaabi_dbg_debug_do( 136 // instrusive link field for debugging 137 struct thread_desc * dbg_next; 138 struct thread_desc * dbg_prev; 139 ) 138 struct { 139 struct thread_desc * next; 140 struct thread_desc * prev; 141 } node; 140 142 }; 141 143 … … 144 146 static inline thread_desc * & get_next( thread_desc & this ) { 145 147 return this.next; 148 } 149 150 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) { 151 return this.node.[next, prev]; 146 152 } 147 153 -
src/libcfa/concurrency/kernel
rf6f0cca3 rff29f08 40 40 41 41 //----------------------------------------------------------------------------- 42 // Cluster 43 struct cluster { 44 // Ready queue locks 45 __spinlock_t ready_queue_lock; 42 // Processor 43 extern struct cluster * mainCluster; 46 44 47 // Ready queue for threads48 __queue_t(thread_desc) ready_queue;49 50 // Preemption rate on this cluster51 Duration preemption_rate;52 };53 54 extern Duration default_preemption();55 56 void ?{} (cluster & this);57 void ^?{}(cluster & this);58 59 //-----------------------------------------------------------------------------60 // Processor61 45 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule }; 62 46 … … 90 74 91 75 // Cluster from which to get threads 92 cluster * cltr; 76 struct cluster * cltr; 77 78 // Name of the processor 79 const char * name; 93 80 94 81 // Handle to pthreads … … 113 100 bool pending_preemption; 114 101 102 // Idle lock 103 104 // Link lists fields 105 struct { 106 struct processor * next; 107 struct processor * prev; 108 } node; 109 115 110 #ifdef __CFA_DEBUG__ 116 111 // Last function to enable preemption on this processor … … 119 114 }; 120 115 121 void ?{}(processor & this); 122 void ?{}(processor & this, cluster * cltr); 116 void ?{}(processor & this, const char * name, struct cluster & cltr); 123 117 void ^?{}(processor & this); 118 119 static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; } 120 static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; } 121 static inline void ?{}(processor & this, const char * name) { this{name, *mainCluster }; } 122 123 static inline [processor *&, processor *& ] __get( processor & this ) { 124 return this.node.[next, prev]; 125 } 126 127 //----------------------------------------------------------------------------- 128 // Cluster 129 struct cluster { 130 // Ready queue locks 131 __spinlock_t ready_queue_lock; 132 133 // Ready queue for threads 134 __queue_t(thread_desc) ready_queue; 135 136 // Name of the cluster 137 const char * name; 138 139 // Preemption rate on this cluster 140 Duration preemption_rate; 141 142 // List of processors 143 __spinlock_t proc_list_lock; 144 __dllist_t(struct processor) procs; 145 __dllist_t(struct processor) idles; 146 147 // Link lists fields 148 struct { 149 cluster * next; 150 cluster * prev; 151 } node; 152 }; 153 extern Duration default_preemption(); 154 155 void ?{} (cluster & this, const char * name, Duration preemption_rate); 156 void ^?{}(cluster & this); 157 158 static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption()}; } 159 static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; } 160 static inline void ?{} (cluster & this, const char * name) { this{name, default_preemption()}; } 161 162 static inline [cluster *&, cluster *& ] __get( cluster & this ) { 163 return this.node.[next, prev]; 164 } 124 165 125 166 // Local Variables: // -
src/libcfa/concurrency/kernel.c
rf6f0cca3 rff29f08 42 42 KERNEL_STORAGE(cluster, mainCluster); 43 43 KERNEL_STORAGE(processor, mainProcessor); 44 KERNEL_STORAGE(processorCtx_t, mainProcessorCtx);45 44 KERNEL_STORAGE(thread_desc, mainThread); 46 45 KERNEL_STORAGE(machine_context_t, mainThreadCtx); 47 46 48 cluster *mainCluster;49 processor *mainProcessor;47 cluster * mainCluster; 48 processor * mainProcessor; 50 49 thread_desc * mainThread; 50 51 struct { __dllist_t(thread_desc) list; __spinlock_t lock; } global_threads ; 52 struct { __dllist_t(cluster ) list; __spinlock_t lock; } global_clusters; 51 53 52 54 //----------------------------------------------------------------------------- … … 57 59 // volatile thread_local unsigned short disable_preempt_count = 1; 58 60 59 thread_local struct KernelThreadData kernelT hreadData= {61 thread_local struct KernelThreadData kernelTLS = { 60 62 NULL, 61 63 NULL, … … 65 67 66 68 //----------------------------------------------------------------------------- 67 // Main thread construction69 // Struct to steal stack 68 70 struct current_stack_info_t { 69 71 machine_context_t ctx; … … 90 92 } 91 93 94 //----------------------------------------------------------------------------- 95 // Main thread construction 92 96 void ?{}( coStack_t & this, current_stack_info_t * info) with( this ) { 93 97 size = info->size; … … 111 115 self_cor{ info }; 112 116 curr_cor = &self_cor; 117 curr_cluster = mainCluster; 113 118 self_mon.owner = &this; 114 119 self_mon.recursion = 1; 115 120 self_mon_p = &self_mon; 116 121 next = NULL; 117 __cfaabi_dbg_debug_do( 118 dbg_next = NULL; 119 dbg_prev = NULL; 120 __cfaabi_dbg_thread_register(&this); 121 ) 122 123 node.next = NULL; 124 node.prev = NULL; 125 doregister(this); 122 126 123 127 monitors{ &self_mon_p, 1, (fptr_t)0 }; … … 126 130 //----------------------------------------------------------------------------- 127 131 // Processor coroutine 128 void ?{}(processorCtx_t & this) {} 129 130 // Construct the processor context of the main processor 131 void ?{}(processorCtx_t & this, processor * proc) { 132 (this.__cor){ "Processor" }; 133 this.__cor.starter = NULL; 134 this.proc = proc; 132 void ?{}(processorCtx_t & this) { 133 135 134 } 136 135 … … 141 140 } 142 141 143 void ?{}(processor & this) { 144 this{ mainCluster }; 145 } 146 147 void ?{}(processor & this, cluster * cltr) with( this ) { 148 this.cltr = cltr; 142 void ?{}(processor & this, const char * name, cluster & cltr) with( this ) { 143 this.name = name; 144 this.cltr = &cltr; 149 145 terminated{ 0 }; 150 146 do_terminate = false; … … 156 152 } 157 153 158 void ?{}(processor & this, cluster * cltr, processorCtx_t & runner) with( this ) {159 this.cltr = cltr;160 terminated{ 0 };161 do_terminate = false;162 preemption_alarm = NULL;163 pending_preemption = false;164 kernel_thread = pthread_self();165 runner.proc = &this;166 167 __cfaabi_dbg_print_safe("Kernel : constructing main processor context %p\n", &runner);168 runner{ &this };169 }170 171 154 void ^?{}(processor & this) with( this ){ 172 155 if( ! do_terminate ) { … … 174 157 terminate(&this); 175 158 verify(this.do_terminate); 176 verify( TL_GET( this_processor )!= &this);159 verify( kernelTLS.this_processor != &this); 177 160 P( terminated ); 178 verify( TL_GET( this_processor )!= &this);161 verify( kernelTLS.this_processor != &this); 179 162 pthread_join( kernel_thread, NULL ); 180 163 } 181 164 } 182 165 183 void ?{}(cluster & this) with( this ) { 166 void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) { 167 this.name = name; 168 this.preemption_rate = preemption_rate; 184 169 ready_queue{}; 185 170 ready_queue_lock{}; 186 171 187 preemption_rate = default_preemption(); 172 procs{ __get }; 173 idles{ __get }; 174 175 doregister(this); 188 176 } 189 177 190 178 void ^?{}(cluster & this) { 191 179 unregister(this); 192 180 } 193 181 … … 202 190 __cfaabi_dbg_print_safe("Kernel : core %p starting\n", this); 203 191 192 doregister(this->cltr, this); 193 204 194 { 205 195 // Setup preemption data … … 215 205 if(readyThread) 216 206 { 217 verify( ! TL_GET( preemption_state ).enabled );207 verify( ! kernelTLS.preemption_state.enabled ); 218 208 219 209 runThread(this, readyThread); 220 210 221 verify( ! TL_GET( preemption_state ).enabled );211 verify( ! kernelTLS.preemption_state.enabled ); 222 212 223 213 //Some actions need to be taken from the kernel … … 235 225 } 236 226 227 unregister(this->cltr, this); 228 237 229 V( this->terminated ); 238 230 … … 240 232 } 241 233 234 // KERNEL ONLY 242 235 // runThread runs a thread by context switching 243 236 // from the processor coroutine to the target thread … … 247 240 coroutine_desc * thrd_cor = dst->curr_cor; 248 241 249 // Reset the terminating actions here242 // Reset the terminating actions here 250 243 this->finish.action_code = No_Action; 251 244 252 // Update global state253 TL_SET( this_thread, dst );245 // Update global state 246 kernelTLS.this_thread = dst; 254 247 255 248 // Context Switch to the thread … … 258 251 } 259 252 253 // KERNEL_ONLY 260 254 void returnToKernel() { 261 coroutine_desc * proc_cor = get_coroutine( TL_GET( this_processor )->runner);262 coroutine_desc * thrd_cor = TL_GET( this_thread )->curr_cor = TL_GET( this_coroutine );255 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 256 coroutine_desc * thrd_cor = kernelTLS.this_thread->curr_cor = kernelTLS.this_coroutine; 263 257 ThreadCtxSwitch(thrd_cor, proc_cor); 264 258 } 265 259 260 // KERNEL_ONLY 266 261 // Once a thread has finished running, some of 267 262 // its final actions must be executed from the kernel 268 263 void finishRunning(processor * this) with( this->finish ) { 269 264 if( action_code == Release ) { 270 verify( ! TL_GET( preemption_state ).enabled );265 verify( ! kernelTLS.preemption_state.enabled ); 271 266 unlock( *lock ); 272 267 } … … 275 270 } 276 271 else if( action_code == Release_Schedule ) { 277 verify( ! TL_GET( preemption_state ).enabled );272 verify( ! kernelTLS.preemption_state.enabled ); 278 273 unlock( *lock ); 279 274 ScheduleThread( thrd ); 280 275 } 281 276 else if( action_code == Release_Multi ) { 282 verify( ! TL_GET( preemption_state ).enabled );277 verify( ! kernelTLS.preemption_state.enabled ); 283 278 for(int i = 0; i < lock_count; i++) { 284 279 unlock( *locks[i] ); … … 304 299 } 305 300 301 // KERNEL_ONLY 306 302 // Context invoker for processors 307 303 // This is the entry point for processors (kernel threads) … … 309 305 void * CtxInvokeProcessor(void * arg) { 310 306 processor * proc = (processor *) arg; 311 TL_SET( this_processor, proc ); 312 TL_SET( this_coroutine, NULL ); 313 TL_SET( this_thread, NULL ); 314 TL_GET( preemption_state ).enabled = false; 315 TL_GET( preemption_state ).disable_count = 1; 307 kernelTLS.this_processor = proc; 308 kernelTLS.this_coroutine = NULL; 309 kernelTLS.this_thread = NULL; 310 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 316 311 // SKULLDUGGERY: We want to create a context for the processor coroutine 317 312 // which is needed for the 2-step context switch. However, there is no reason … … 325 320 326 321 //Set global state 327 TL_SET( this_coroutine, get_coroutine(proc->runner));328 TL_SET( this_thread, NULL );322 kernelTLS.this_coroutine = get_coroutine(proc->runner); 323 kernelTLS.this_thread = NULL; 329 324 330 325 //We now have a proper context from which to schedule threads … … 353 348 } 354 349 350 // KERNEL_ONLY 355 351 void kernel_first_resume(processor * this) { 356 coroutine_desc * src = TL_GET( this_coroutine );352 coroutine_desc * src = kernelTLS.this_coroutine; 357 353 coroutine_desc * dst = get_coroutine(this->runner); 358 354 359 verify( ! TL_GET( preemption_state ).enabled );355 verify( ! kernelTLS.preemption_state.enabled ); 360 356 361 357 create_stack(&dst->stack, dst->stack.size); 362 358 CtxStart(&this->runner, CtxInvokeCoroutine); 363 359 364 verify( ! TL_GET( preemption_state ).enabled );360 verify( ! kernelTLS.preemption_state.enabled ); 365 361 366 362 dst->last = src; … … 371 367 372 368 // set new coroutine that task is executing 373 TL_SET( this_coroutine, dst );369 kernelTLS.this_coroutine = dst; 374 370 375 371 // SKULLDUGGERY normally interrupts are enable before leaving a coroutine ctxswitch. … … 388 384 src->state = Active; 389 385 390 verify( ! TL_GET( preemption_state ).enabled );386 verify( ! kernelTLS.preemption_state.enabled ); 391 387 } 392 388 393 389 //----------------------------------------------------------------------------- 394 390 // Scheduler routines 391 392 // KERNEL ONLY 395 393 void ScheduleThread( thread_desc * thrd ) { 396 // if( ! thrd ) return;397 394 verify( thrd ); 398 395 verify( thrd->self_cor.state != Halted ); 399 396 400 verify( ! TL_GET( preemption_state ).enabled );397 verify( ! kernelTLS.preemption_state.enabled ); 401 398 402 399 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 403 400 404 with( * TL_GET( this_processor )->cltr ) {401 with( *thrd->curr_cluster ) { 405 402 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 406 403 append( ready_queue, thrd ); … … 408 405 } 409 406 410 verify( ! TL_GET( preemption_state ).enabled ); 411 } 412 407 verify( ! kernelTLS.preemption_state.enabled ); 408 } 409 410 // KERNEL ONLY 413 411 thread_desc * nextThread(cluster * this) with( *this ) { 414 verify( ! TL_GET( preemption_state ).enabled );412 verify( ! kernelTLS.preemption_state.enabled ); 415 413 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 416 414 thread_desc * head = pop_head( ready_queue ); 417 415 unlock( ready_queue_lock ); 418 verify( ! TL_GET( preemption_state ).enabled );416 verify( ! kernelTLS.preemption_state.enabled ); 419 417 return head; 420 418 } … … 422 420 void BlockInternal() { 423 421 disable_interrupts(); 424 verify( ! TL_GET( preemption_state ).enabled );422 verify( ! kernelTLS.preemption_state.enabled ); 425 423 returnToKernel(); 426 verify( ! TL_GET( preemption_state ).enabled );424 verify( ! kernelTLS.preemption_state.enabled ); 427 425 enable_interrupts( __cfaabi_dbg_ctx ); 428 426 } … … 430 428 void BlockInternal( __spinlock_t * lock ) { 431 429 disable_interrupts(); 432 TL_GET( this_processor )->finish.action_code = Release; 433 TL_GET( this_processor )->finish.lock = lock; 434 435 verify( ! TL_GET( preemption_state ).enabled ); 430 with( *kernelTLS.this_processor ) { 431 finish.action_code = Release; 432 finish.lock = lock; 433 } 434 435 verify( ! kernelTLS.preemption_state.enabled ); 436 436 returnToKernel(); 437 verify( ! TL_GET( preemption_state ).enabled );437 verify( ! kernelTLS.preemption_state.enabled ); 438 438 439 439 enable_interrupts( __cfaabi_dbg_ctx ); … … 442 442 void BlockInternal( thread_desc * thrd ) { 443 443 disable_interrupts(); 444 TL_GET( this_processor )->finish.action_code = Schedule; 445 TL_GET( this_processor )->finish.thrd = thrd; 446 447 verify( ! TL_GET( preemption_state ).enabled ); 444 with( * kernelTLS.this_processor ) { 445 finish.action_code = Schedule; 446 finish.thrd = thrd; 447 } 448 449 verify( ! kernelTLS.preemption_state.enabled ); 448 450 returnToKernel(); 449 verify( ! TL_GET( preemption_state ).enabled );451 verify( ! kernelTLS.preemption_state.enabled ); 450 452 451 453 enable_interrupts( __cfaabi_dbg_ctx ); … … 455 457 assert(thrd); 456 458 disable_interrupts(); 457 TL_GET( this_processor )->finish.action_code = Release_Schedule; 458 TL_GET( this_processor )->finish.lock = lock; 459 TL_GET( this_processor )->finish.thrd = thrd; 460 461 verify( ! TL_GET( preemption_state ).enabled ); 459 with( * kernelTLS.this_processor ) { 460 finish.action_code = Release_Schedule; 461 finish.lock = lock; 462 finish.thrd = thrd; 463 } 464 465 verify( ! kernelTLS.preemption_state.enabled ); 462 466 returnToKernel(); 463 verify( ! TL_GET( preemption_state ).enabled );467 verify( ! kernelTLS.preemption_state.enabled ); 464 468 465 469 enable_interrupts( __cfaabi_dbg_ctx ); … … 468 472 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 469 473 disable_interrupts(); 470 TL_GET( this_processor )->finish.action_code = Release_Multi; 471 TL_GET( this_processor )->finish.locks = locks; 472 TL_GET( this_processor )->finish.lock_count = count; 473 474 verify( ! TL_GET( preemption_state ).enabled ); 474 with( * kernelTLS.this_processor ) { 475 finish.action_code = Release_Multi; 476 finish.locks = locks; 477 finish.lock_count = count; 478 } 479 480 verify( ! kernelTLS.preemption_state.enabled ); 475 481 returnToKernel(); 476 verify( ! TL_GET( preemption_state ).enabled );482 verify( ! kernelTLS.preemption_state.enabled ); 477 483 478 484 enable_interrupts( __cfaabi_dbg_ctx ); … … 481 487 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 482 488 disable_interrupts(); 483 TL_GET( this_processor )->finish.action_code = Release_Multi_Schedule; 484 TL_GET( this_processor )->finish.locks = locks; 485 TL_GET( this_processor )->finish.lock_count = lock_count; 486 TL_GET( this_processor )->finish.thrds = thrds; 487 TL_GET( this_processor )->finish.thrd_count = thrd_count; 488 489 verify( ! TL_GET( preemption_state ).enabled ); 489 with( *kernelTLS.this_processor ) { 490 finish.action_code = Release_Multi_Schedule; 491 finish.locks = locks; 492 finish.lock_count = lock_count; 493 finish.thrds = thrds; 494 finish.thrd_count = thrd_count; 495 } 496 497 verify( ! kernelTLS.preemption_state.enabled ); 490 498 returnToKernel(); 491 verify( ! TL_GET( preemption_state ).enabled );499 verify( ! kernelTLS.preemption_state.enabled ); 492 500 493 501 enable_interrupts( __cfaabi_dbg_ctx ); 494 502 } 495 503 504 // KERNEL ONLY 496 505 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 497 verify( ! TL_GET( preemption_state ).enabled ); 498 TL_GET( this_processor )->finish.action_code = thrd ? Release_Schedule : Release; 499 TL_GET( this_processor )->finish.lock = lock; 500 TL_GET( this_processor )->finish.thrd = thrd; 506 verify( ! kernelTLS.preemption_state.enabled ); 507 with( * kernelTLS.this_processor ) { 508 finish.action_code = thrd ? Release_Schedule : Release; 509 finish.lock = lock; 510 finish.thrd = thrd; 511 } 501 512 502 513 returnToKernel(); … … 509 520 // Kernel boot procedures 510 521 void kernel_startup(void) { 511 verify( ! TL_GET( preemption_state ).enabled );522 verify( ! kernelTLS.preemption_state.enabled ); 512 523 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 524 525 global_threads. list{ __get }; 526 global_threads. lock{}; 527 global_clusters.list{ __get }; 528 global_clusters.lock{}; 529 530 // Initialize the main cluster 531 mainCluster = (cluster *)&storage_mainCluster; 532 (*mainCluster){"Main Cluster"}; 533 534 __cfaabi_dbg_print_safe("Kernel : Main cluster ready\n"); 513 535 514 536 // Start by initializing the main thread … … 521 543 __cfaabi_dbg_print_safe("Kernel : Main thread ready\n"); 522 544 523 // Initialize the main cluster 524 mainCluster = (cluster *)&storage_mainCluster; 525 (*mainCluster){}; 526 527 __cfaabi_dbg_print_safe("Kernel : main cluster ready\n"); 545 546 547 // Construct the processor context of the main processor 548 void ?{}(processorCtx_t & this, processor * proc) { 549 (this.__cor){ "Processor" }; 550 this.__cor.starter = NULL; 551 this.proc = proc; 552 } 553 554 void ?{}(processor & this) with( this ) { 555 name = "Main Processor"; 556 cltr = mainCluster; 557 terminated{ 0 }; 558 do_terminate = false; 559 preemption_alarm = NULL; 560 pending_preemption = false; 561 kernel_thread = pthread_self(); 562 563 runner{ &this }; 564 __cfaabi_dbg_print_safe("Kernel : constructed main processor context %p\n", &runner); 565 } 528 566 529 567 // Initialize the main processor and the main processor ctx 530 568 // (the coroutine that contains the processing control flow) 531 569 mainProcessor = (processor *)&storage_mainProcessor; 532 (*mainProcessor){ mainCluster, *(processorCtx_t *)&storage_mainProcessorCtx};570 (*mainProcessor){}; 533 571 534 572 //initialize the global state variables 535 TL_SET( this_processor, mainProcessor );536 TL_SET( this_thread, mainThread );537 TL_SET( this_coroutine, &mainThread->self_cor );573 kernelTLS.this_processor = mainProcessor; 574 kernelTLS.this_thread = mainThread; 575 kernelTLS.this_coroutine = &mainThread->self_cor; 538 576 539 577 // Enable preemption … … 547 585 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 548 586 // mainThread is on the ready queue when this call is made. 549 kernel_first_resume( TL_GET( this_processor ));587 kernel_first_resume( kernelTLS.this_processor ); 550 588 551 589 … … 554 592 __cfaabi_dbg_print_safe("Kernel : Started\n--------------------------------------------------\n\n"); 555 593 556 verify( ! TL_GET( preemption_state ).enabled );594 verify( ! kernelTLS.preemption_state.enabled ); 557 595 enable_interrupts( __cfaabi_dbg_ctx ); 558 verify( TL_GET( preemption_state ).enabled);596 verify( TL_GET( preemption_state.enabled ) ); 559 597 } 560 598 … … 562 600 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 563 601 564 verify( TL_GET( preemption_state ).enabled);602 verify( TL_GET( preemption_state.enabled ) ); 565 603 disable_interrupts(); 566 verify( ! TL_GET( preemption_state ).enabled );604 verify( ! kernelTLS.preemption_state.enabled ); 567 605 568 606 // SKULLDUGGERY: Notify the mainProcessor it needs to terminates. … … 590 628 591 629 //============================================================================================= 630 // Kernel Quiescing 631 //============================================================================================= 632 633 // void halt(processor * this) with( this ) { 634 // pthread_mutex_lock( &idle.lock ); 635 636 637 638 // // SKULLDUGGERY: Even if spurious wake-up is a thing 639 // // spuriously waking up a kernel thread is not a big deal 640 // // if it is very rare. 641 // pthread_cond_wait( &idle.cond, &idle.lock); 642 // pthread_mutex_unlock( &idle.lock ); 643 // } 644 645 // void wake(processor * this) with( this ) { 646 // pthread_mutex_lock (&idle.lock); 647 // pthread_cond_signal (&idle.cond); 648 // pthread_mutex_unlock(&idle.lock); 649 // } 650 651 //============================================================================================= 592 652 // Unexpected Terminating logic 593 653 //============================================================================================= … … 595 655 596 656 static __spinlock_t kernel_abort_lock; 597 static __spinlock_t kernel_debug_lock;598 657 static bool kernel_abort_called = false; 599 658 600 void * kernel_abort 659 void * kernel_abort(void) __attribute__ ((__nothrow__)) { 601 660 // abort cannot be recursively entered by the same or different processors because all signal handlers return when 602 661 // the globalAbort flag is true. … … 604 663 605 664 // first task to abort ? 606 if ( ! kernel_abort_called ) { // not first task to abort ? 665 if ( kernel_abort_called ) { // not first task to abort ? 666 unlock( kernel_abort_lock ); 667 668 sigset_t mask; 669 sigemptyset( &mask ); 670 sigaddset( &mask, SIGALRM ); // block SIGALRM signals 671 sigsuspend( &mask ); // block the processor to prevent further damage during abort 672 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 673 } 674 else { 607 675 kernel_abort_called = true; 608 676 unlock( kernel_abort_lock ); 609 677 } 610 else { 611 unlock( kernel_abort_lock ); 612 613 sigset_t mask; 614 sigemptyset( &mask ); 615 sigaddset( &mask, SIGALRM ); // block SIGALRM signals 616 sigaddset( &mask, SIGUSR1 ); // block SIGUSR1 signals 617 sigsuspend( &mask ); // block the processor to prevent further damage during abort 618 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 619 } 620 621 return TL_GET( this_thread ); 678 679 return kernelTLS.this_thread; 622 680 } 623 681 … … 625 683 thread_desc * thrd = kernel_data; 626 684 627 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing task %.256s (%p)", thrd->self_cor.name, thrd ); 628 __cfaabi_dbg_bits_write( abort_text, len ); 629 630 if ( get_coroutine(thrd) != TL_GET( this_coroutine ) ) { 631 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", TL_GET( this_coroutine )->name, TL_GET( this_coroutine ) ); 685 if(thrd) { 686 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 632 687 __cfaabi_dbg_bits_write( abort_text, len ); 688 689 if ( get_coroutine(thrd) != kernelTLS.this_coroutine ) { 690 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", kernelTLS.this_coroutine->name, kernelTLS.this_coroutine ); 691 __cfaabi_dbg_bits_write( abort_text, len ); 692 } 693 else { 694 __cfaabi_dbg_bits_write( ".\n", 2 ); 695 } 633 696 } 634 697 else { 635 __cfaabi_dbg_bits_write( ".\n", 2);698 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); 636 699 } 637 700 } 638 701 639 702 int kernel_abort_lastframe( void ) __attribute__ ((__nothrow__)) { 640 return get_coroutine(TL_GET( this_thread )) == get_coroutine(mainThread) ? 4 : 2; 641 } 703 return get_coroutine(kernelTLS.this_thread) == get_coroutine(mainThread) ? 4 : 2; 704 } 705 706 static __spinlock_t kernel_debug_lock; 642 707 643 708 extern "C" { … … 668 733 if ( count < 0 ) { 669 734 // queue current task 670 append( waiting, (thread_desc *)TL_GET( this_thread ));735 append( waiting, kernelTLS.this_thread ); 671 736 672 737 // atomically release spin lock and block … … 694 759 695 760 //----------------------------------------------------------------------------- 761 // Global Queues 762 void doregister( thread_desc & thrd ) { 763 // lock ( global_thread.lock ); 764 // push_front( global_thread.list, thrd ); 765 // unlock ( global_thread.lock ); 766 } 767 768 void unregister( thread_desc & thrd ) { 769 // lock ( global_thread.lock ); 770 // remove( global_thread.list, thrd ); 771 // unlock( global_thread.lock ); 772 } 773 774 void doregister( cluster & cltr ) { 775 // lock ( global_cluster.lock ); 776 // push_front( global_cluster.list, cltr ); 777 // unlock ( global_cluster.lock ); 778 } 779 780 void unregister( cluster & cltr ) { 781 // lock ( global_cluster.lock ); 782 // remove( global_cluster.list, cltr ); 783 // unlock( global_cluster.lock ); 784 } 785 786 787 void doregister( cluster * cltr, processor * proc ) { 788 // lock (cltr->proc_list_lock __cfaabi_dbg_ctx2); 789 // push_front(cltr->procs, *proc); 790 // unlock (cltr->proc_list_lock); 791 } 792 793 void unregister( cluster * cltr, processor * proc ) { 794 // lock (cltr->proc_list_lock __cfaabi_dbg_ctx2); 795 // remove(cltr->procs, *proc ); 796 // unlock(cltr->proc_list_lock); 797 } 798 799 //----------------------------------------------------------------------------- 696 800 // Debug 697 801 __cfaabi_dbg_debug_do( 698 struct { 699 thread_desc * tail; 700 } __cfaabi_dbg_thread_list = { NULL }; 701 702 void __cfaabi_dbg_thread_register( thread_desc * thrd ) { 703 if( !__cfaabi_dbg_thread_list.tail ) { 704 __cfaabi_dbg_thread_list.tail = thrd; 705 return; 706 } 707 __cfaabi_dbg_thread_list.tail->dbg_next = thrd; 708 thrd->dbg_prev = __cfaabi_dbg_thread_list.tail; 709 __cfaabi_dbg_thread_list.tail = thrd; 710 } 711 712 void __cfaabi_dbg_thread_unregister( thread_desc * thrd ) { 713 thread_desc * prev = thrd->dbg_prev; 714 thread_desc * next = thrd->dbg_next; 715 716 if( next ) { next->dbg_prev = prev; } 717 else { 718 assert( __cfaabi_dbg_thread_list.tail == thrd ); 719 __cfaabi_dbg_thread_list.tail = prev; 720 } 721 722 if( prev ) { prev->dbg_next = next; } 723 724 thrd->dbg_prev = NULL; 725 thrd->dbg_next = NULL; 802 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) { 803 this.prev_name = prev_name; 804 this.prev_thrd = kernelTLS.this_thread; 726 805 } 727 806 ) -
src/libcfa/concurrency/kernel_private.h
rf6f0cca3 rff29f08 100 100 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 101 101 102 103 void doregister( struct thread_desc & thrd ); 104 void unregister( struct thread_desc & thrd ); 105 106 void doregister( struct cluster & cltr ); 107 void unregister( struct cluster & cltr ); 108 109 void doregister( struct cluster * cltr, struct processor * proc ); 110 void unregister( struct cluster * cltr, struct processor * proc ); 111 102 112 // Local Variables: // 103 113 // mode: c // -
src/libcfa/concurrency/monitor.c
rf6f0cca3 rff29f08 85 85 // Lock the monitor spinlock 86 86 lock( this->lock __cfaabi_dbg_ctx2 ); 87 thread_desc * thrd = TL_GET( this_thread ); 87 // Interrupts disable inside critical section 88 thread_desc * thrd = kernelTLS.this_thread; 88 89 89 90 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); … … 134 135 // Lock the monitor spinlock 135 136 lock( this->lock __cfaabi_dbg_ctx2 ); 136 thread_desc * thrd = TL_GET( this_thread ); 137 // Interrupts disable inside critical section 138 thread_desc * thrd = kernelTLS.this_thread; 137 139 138 140 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); … … 168 170 169 171 // Create the node specific to this wait operation 170 wait_ctx_primed( TL_GET( this_thread ), 0 )172 wait_ctx_primed( thrd, 0 ) 171 173 172 174 // Some one else has the monitor, wait for him to finish and then run … … 179 181 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 180 182 181 wait_ctx( TL_GET( this_thread ), 0 )183 wait_ctx( thrd, 0 ) 182 184 this->dtor_node = &waiter; 183 185 … … 199 201 lock( this->lock __cfaabi_dbg_ctx2 ); 200 202 201 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", TL_GET( this_thread ), this, this->owner);202 203 verifyf( TL_GET( this_thread ) == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", TL_GET( this_thread ), this->owner, this->recursion, this );203 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 204 205 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 204 206 205 207 // Leaving a recursion level, decrement the counter … … 289 291 // Sorts monitors before entering 290 292 void ?{}( monitor_guard_t & this, monitor_desc * m [], __lock_size_t count, fptr_t func ) { 293 thread_desc * thrd = TL_GET( this_thread ); 294 291 295 // Store current array 292 296 this.m = m; … … 297 301 298 302 // Save previous thread context 299 this.prev = TL_GET( this_thread )->monitors;303 this.prev = thrd->monitors; 300 304 301 305 // Update thread context (needed for conditions) 302 ( TL_GET( this_thread )->monitors){m, count, func};306 (thrd->monitors){m, count, func}; 303 307 304 308 // __cfaabi_dbg_print_safe( "MGUARD : enter %d\n", count); … … 328 332 // Sorts monitors before entering 329 333 void ?{}( monitor_dtor_guard_t & this, monitor_desc * m [], fptr_t func ) { 334 // optimization 335 thread_desc * thrd = TL_GET( this_thread ); 336 330 337 // Store current array 331 338 this.m = *m; 332 339 333 340 // Save previous thread context 334 this.prev = TL_GET( this_thread )->monitors;341 this.prev = thrd->monitors; 335 342 336 343 // Update thread context (needed for conditions) 337 ( TL_GET( this_thread )->monitors){m, 1, func};344 (thrd->monitors){m, 1, func}; 338 345 339 346 __enter_monitor_dtor( this.m, func ); … … 473 480 474 481 // Create the node specific to this wait operation 475 wait_ctx_primed( TL_GET( this_thread ), 0 )482 wait_ctx_primed( kernelTLS.this_thread, 0 ) 476 483 477 484 //save contexts … … 566 573 567 574 // Create the node specific to this wait operation 568 wait_ctx_primed( TL_GET( this_thread ), 0 );575 wait_ctx_primed( kernelTLS.this_thread, 0 ); 569 576 570 577 // Save monitor states … … 612 619 613 620 // Create the node specific to this wait operation 614 wait_ctx_primed( TL_GET( this_thread ), 0 );621 wait_ctx_primed( kernelTLS.this_thread, 0 ); 615 622 616 623 monitor_save; … … 618 625 619 626 for( __lock_size_t i = 0; i < count; i++) { 620 verify( monitors[i]->owner == TL_GET( this_thread ));627 verify( monitors[i]->owner == kernelTLS.this_thread ); 621 628 } 622 629 -
src/libcfa/concurrency/preemption.c
rf6f0cca3 rff29f08 149 149 // Disable interrupts by incrementing the counter 150 150 void disable_interrupts() { 151 TL_GET( preemption_state ).enabled = false; 152 __attribute__((unused)) unsigned short new_val = TL_GET( preemption_state ).disable_count + 1; 153 TL_GET( preemption_state ).disable_count = new_val; 154 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 151 with( kernelTLS.preemption_state ) { 152 enabled = false; 153 __attribute__((unused)) unsigned short new_val = disable_count + 1; 154 disable_count = new_val; 155 verify( new_val < 65_000u ); // If this triggers someone is disabling interrupts without enabling them 156 } 155 157 } 156 158 … … 158 160 // If counter reaches 0, execute any pending CtxSwitch 159 161 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 160 processor * proc = TL_GET( this_processor ); // Cache the processor now since interrupts can start happening after the atomic add 161 thread_desc * thrd = TL_GET( this_thread ); // Cache the thread now since interrupts can start happening after the atomic add 162 163 unsigned short prev = TL_GET( preemption_state ).disable_count; 164 TL_GET( preemption_state ).disable_count -= 1; 165 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 166 167 // Check if we need to prempt the thread because an interrupt was missed 168 if( prev == 1 ) { 169 TL_GET( preemption_state ).enabled = true; 170 if( proc->pending_preemption ) { 171 proc->pending_preemption = false; 172 BlockInternal( thrd ); 162 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add 163 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add 164 165 with( kernelTLS.preemption_state ){ 166 unsigned short prev = disable_count; 167 disable_count -= 1; 168 verify( prev != 0u ); // If this triggers someone is enabled already enabled interruptsverify( prev != 0u ); 169 170 // Check if we need to prempt the thread because an interrupt was missed 171 if( prev == 1 ) { 172 enabled = true; 173 if( proc->pending_preemption ) { 174 proc->pending_preemption = false; 175 BlockInternal( thrd ); 176 } 173 177 } 174 178 } … … 181 185 // Don't execute any pending CtxSwitch even if counter reaches 0 182 186 void enable_interrupts_noPoll() { 183 unsigned short prev = TL_GET( preemption_state ).disable_count;184 TL_GET( preemption_state ).disable_count -= 1;187 unsigned short prev = kernelTLS.preemption_state.disable_count; 188 kernelTLS.preemption_state.disable_count -= 1; 185 189 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 186 190 if( prev == 1 ) { 187 TL_GET( preemption_state ).enabled = true;191 kernelTLS.preemption_state.enabled = true; 188 192 } 189 193 } … … 230 234 } 231 235 232 236 // KERNEL ONLY 233 237 // Check if a CtxSwitch signal handler shoud defer 234 238 // If true : preemption is safe 235 239 // If false : preemption is unsafe and marked as pending 236 240 static inline bool preemption_ready() { 237 bool ready = TL_GET( preemption_state ).enabled && !TL_GET( preemption_state ).in_progress; // Check if preemption is safe 238 TL_GET( this_processor )->pending_preemption = !ready; // Adjust the pending flag accordingly 241 // Check if preemption is safe 242 bool ready = kernelTLS.preemption_state.enabled && ! kernelTLS.preemption_state.in_progress; 243 244 // Adjust the pending flag accordingly 245 kernelTLS.this_processor->pending_preemption = !ready; 239 246 return ready; 240 247 } … … 250 257 251 258 // Start with preemption disabled until ready 252 TL_GET( preemption_state ).enabled = false;253 TL_GET( preemption_state ).disable_count = 1;259 kernelTLS.preemption_state.enabled = false; 260 kernelTLS.preemption_state.disable_count = 1; 254 261 255 262 // Initialize the event kernel … … 316 323 // before the kernel thread has even started running. When that happens an iterrupt 317 324 // we a null 'this_processor' will be caught, just ignore it. 318 if(! TL_GET( this_processor )) return;325 if(! kernelTLS.this_processor ) return; 319 326 320 327 choose(sfp->si_value.sival_int) { 321 328 case PREEMPT_NORMAL : ;// Normal case, nothing to do here 322 case PREEMPT_TERMINATE: verify( TL_GET( this_processor )->do_terminate);329 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate); 323 330 default: 324 331 abort( "internal error, signal value is %d", sfp->si_value.sival_int ); … … 328 335 if( !preemption_ready() ) { return; } 329 336 330 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", this_processor, this_thread); 331 332 TL_GET( preemption_state ).in_progress = true; // Sync flag : prevent recursive calls to the signal handler 333 signal_unblock( SIGUSR1 ); // We are about to CtxSwitch out of the signal handler, let other handlers in 334 TL_GET( preemption_state ).in_progress = false; // Clear the in progress flag 337 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread ); 338 339 // Sync flag : prevent recursive calls to the signal handler 340 kernelTLS.preemption_state.in_progress = true; 341 342 // We are about to CtxSwitch out of the signal handler, let other handlers in 343 signal_unblock( SIGUSR1 ); 344 345 // TODO: this should go in finish action 346 // Clear the in progress flag 347 kernelTLS.preemption_state.in_progress = false; 335 348 336 349 // Preemption can occur here 337 350 338 BlockInternal( (thread_desc*)TL_GET( this_thread )); // Do the actual CtxSwitch351 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch 339 352 } 340 353 … … 344 357 // Block sigalrms to control when they arrive 345 358 sigset_t mask; 359 sigfillset(&mask); 360 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) { 361 abort( "internal error, pthread_sigmask" ); 362 } 363 346 364 sigemptyset( &mask ); 347 365 sigaddset( &mask, SIGALRM ); 348 349 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL ) == -1 ) {350 abort( "internal error, pthread_sigmask" );351 }352 366 353 367 // Main loop … … 400 414 } 401 415 416 //============================================================================================= 417 // Kernel Signal Debug 418 //============================================================================================= 419 420 void __cfaabi_check_preemption() { 421 bool ready = kernelTLS.preemption_state.enabled; 422 if(!ready) { abort("Preemption should be ready"); } 423 424 sigset_t oldset; 425 int ret; 426 ret = sigprocmask(0, NULL, &oldset); 427 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 428 429 ret = sigismember(&oldset, SIGUSR1); 430 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 431 432 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 433 } 434 402 435 // Local Variables: // 403 436 // mode: c // -
src/libcfa/concurrency/thread
rf6f0cca3 rff29f08 20 20 21 21 #include "coroutine" 22 #include "kernel" 22 23 #include "monitor" 23 24 24 25 //----------------------------------------------------------------------------- 25 // Coroutine trait 26 // Anything that implements this trait can be resumed. 27 // Anything that is resumed is a coroutine. 26 // thread trait 28 27 trait is_thread(dtype T) { 29 28 void ^?{}(T& mutex this); … … 52 51 } 53 52 54 //extern thread_local thread_desc * volatile this_thread;53 extern struct cluster * mainCluster; 55 54 56 55 forall( dtype T | is_thread(T) ) … … 59 58 //----------------------------------------------------------------------------- 60 59 // Ctors and dtors 61 void ?{}(thread_desc& this); 62 void ^?{}(thread_desc& this); 60 void ?{}(thread_desc & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize ); 61 void ^?{}(thread_desc & this); 62 63 static inline void ?{}(thread_desc & this) { this{ "Anonymous Thread", *mainCluster, NULL, 0 }; } 64 static inline void ?{}(thread_desc & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; } 65 static inline void ?{}(thread_desc & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 66 static inline void ?{}(thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, NULL, 0 }; } 67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0, stackSize }; } 68 static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 69 static inline void ?{}(thread_desc & this, const char * const name) { this{ name, *mainCluster, NULL, 0 }; } 70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, NULL, 0 }; } 71 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; } 63 72 64 73 //----------------------------------------------------------------------------- -
src/libcfa/concurrency/thread.c
rf6f0cca3 rff29f08 30 30 //----------------------------------------------------------------------------- 31 31 // Thread ctors and dtors 32 33 void ?{}(thread_desc& this) with( this ) { 34 self_cor{}; 35 self_cor.name = "Anonymous Coroutine"; 32 void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 33 self_cor{ name, storage, storageSize }; 34 verify(&self_cor); 36 35 curr_cor = &self_cor; 37 36 self_mon.owner = &this; 38 37 self_mon.recursion = 1; 39 38 self_mon_p = &self_mon; 39 curr_cluster = &cl; 40 40 next = NULL; 41 __cfaabi_dbg_debug_do( 42 dbg_next = NULL; 43 dbg_prev = NULL; 44 __cfaabi_dbg_thread_register(&this); 45 ) 41 42 node.next = NULL; 43 node.prev = NULL; 44 doregister(this); 46 45 47 46 monitors{ &self_mon_p, 1, (fptr_t)0 }; … … 49 48 50 49 void ^?{}(thread_desc& this) with( this ) { 50 unregister(this); 51 51 ^self_cor{}; 52 52 } … … 81 81 disable_interrupts(); 82 82 create_stack(&thrd_c->stack, thrd_c->stack.size); 83 TL_SET( this_coroutine, thrd_c );83 kernelTLS.this_coroutine = thrd_c; 84 84 CtxStart(&this, CtxInvokeThread); 85 85 assert( thrd_c->last->stack.context ); … … 91 91 92 92 extern "C" { 93 // KERNEL ONLY 93 94 void __finish_creation(void) { 94 coroutine_desc* thrd_c = TL_GET( this_coroutine );95 coroutine_desc* thrd_c = kernelTLS.this_coroutine; 95 96 ThreadCtxSwitch( thrd_c, thrd_c->last ); 96 97 } … … 98 99 99 100 void yield( void ) { 100 verify( TL_GET( preemption_state ).enabled ); 101 // Safety note : This could cause some false positives due to preemption 102 verify( TL_GET( preemption_state.enabled ) ); 101 103 BlockInternal( TL_GET( this_thread ) ); 102 verify( TL_GET( preemption_state ).enabled ); 104 // Safety note : This could cause some false positives due to preemption 105 verify( TL_GET( preemption_state.enabled ) ); 103 106 } 104 107 … … 109 112 } 110 113 114 // KERNEL ONLY 111 115 void ThreadCtxSwitch(coroutine_desc* src, coroutine_desc* dst) { 112 116 // set state of current coroutine to inactive … … 116 120 // set new coroutine that the processor is executing 117 121 // and context switch to it 118 TL_SET( this_coroutine, dst );122 kernelTLS.this_coroutine = dst; 119 123 assert( src->stack.context ); 120 124 CtxSwitch( src->stack.context, dst->stack.context ); 121 TL_SET( this_coroutine, src );125 kernelTLS.this_coroutine = src; 122 126 123 127 // set state of new coroutine to active
Note:
See TracChangeset
for help on using the changeset viewer.