Changeset 3d5701e for libcfa/src/concurrency
- Timestamp:
- Feb 25, 2020, 1:17:33 PM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum, stuck-waitfor-destruct
- Children:
- 7dc2e015
- Parents:
- 9fb8f01 (diff), dd9e1ca (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 19 edited
-
CtxSwitch-arm.S (modified) (2 diffs)
-
CtxSwitch-i386.S (modified) (2 diffs)
-
CtxSwitch-x86_64.S (modified) (3 diffs)
-
alarm.cfa (modified) (5 diffs)
-
alarm.hfa (modified) (3 diffs)
-
coroutine.cfa (modified) (9 diffs)
-
coroutine.hfa (modified) (12 diffs)
-
invoke.c (modified) (7 diffs)
-
invoke.h (modified) (12 diffs)
-
kernel.cfa (modified) (40 diffs)
-
kernel.hfa (modified) (10 diffs)
-
kernel_private.hfa (modified) (4 diffs)
-
monitor.cfa (modified) (42 diffs)
-
monitor.hfa (modified) (8 diffs)
-
mutex.cfa (modified) (12 diffs)
-
mutex.hfa (modified) (5 diffs)
-
preemption.cfa (modified) (20 diffs)
-
thread.cfa (modified) (6 diffs)
-
thread.hfa (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/CtxSwitch-arm.S
r9fb8f01 r3d5701e 13 13 .text 14 14 .align 2 15 .global CtxSwitch16 .type CtxSwitch, %function15 .global __cfactx_switch 16 .type __cfactx_switch, %function 17 17 18 CtxSwitch:18 __cfactx_switch: 19 19 @ save callee-saved registers: r4-r8, r10, r11, r13(sp) (plus r9 depending on platform specification) 20 20 @ I've seen reference to 31 registers on 64-bit, if this is the case, more need to be saved … … 52 52 mov r15, r14 53 53 #endif // R9_SPECIAL 54 54 55 55 .text 56 56 .align 2 57 .global CtxInvokeStub58 .type CtxInvokeStub, %function57 .global __cfactx_invoke_stub 58 .type __cfactx_invoke_stub, %function 59 59 60 CtxInvokeStub:60 __cfactx_invoke_stub: 61 61 ldmfd r13!, {r0-r1} 62 62 mov r15, r1 -
libcfa/src/concurrency/CtxSwitch-i386.S
r9fb8f01 r3d5701e 43 43 .text 44 44 .align 2 45 .globl CtxSwitch46 .type CtxSwitch, @function47 CtxSwitch:45 .globl __cfactx_switch 46 .type __cfactx_switch, @function 47 __cfactx_switch: 48 48 49 49 // Copy the "from" context argument from the stack to register eax … … 83 83 84 84 ret 85 .size CtxSwitch, .-CtxSwitch85 .size __cfactx_switch, .-__cfactx_switch 86 86 87 87 // Local Variables: // -
libcfa/src/concurrency/CtxSwitch-x86_64.S
r9fb8f01 r3d5701e 44 44 .text 45 45 .align 2 46 .globl CtxSwitch47 .type CtxSwitch, @function48 CtxSwitch:46 .globl __cfactx_switch 47 .type __cfactx_switch, @function 48 __cfactx_switch: 49 49 50 50 // Save volatile registers on the stack. … … 77 77 78 78 ret 79 .size CtxSwitch, .-CtxSwitch79 .size __cfactx_switch, .-__cfactx_switch 80 80 81 81 //----------------------------------------------------------------------------- … … 83 83 .text 84 84 .align 2 85 .globl CtxInvokeStub86 .type CtxInvokeStub, @function87 CtxInvokeStub:85 .globl __cfactx_invoke_stub 86 .type __cfactx_invoke_stub, @function 87 __cfactx_invoke_stub: 88 88 movq %rbx, %rdi 89 jmp *%r12 90 .size CtxInvokeStub, .-CtxInvokeStub 89 movq %r12, %rsi 90 jmp *%r13 91 .size __cfactx_invoke_stub, .-__cfactx_invoke_stub 91 92 92 93 // Local Variables: // -
libcfa/src/concurrency/alarm.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri May 25 06:25:47 201813 // Update Count : 6 712 // Last Modified On : Sun Jan 5 08:41:36 2020 13 // Update Count : 69 14 14 // 15 15 … … 39 39 40 40 void __kernel_set_timer( Duration alarm ) { 41 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm .tv);42 setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL);41 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm`ns); 42 setitimer( ITIMER_REAL, &(itimerval){ alarm }, 0p ); 43 43 } 44 44 … … 47 47 //============================================================================================= 48 48 49 void ?{}( alarm_node_t & this, thread_desc* thrd, Time alarm, Duration period ) with( this ) {49 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ) with( this ) { 50 50 this.thrd = thrd; 51 51 this.alarm = alarm; … … 113 113 this->tail = &this->head; 114 114 } 115 head->next = NULL;115 head->next = 0p; 116 116 } 117 117 verify( validate( this ) ); … … 127 127 this->tail = it; 128 128 } 129 n->next = NULL;129 n->next = 0p; 130 130 131 131 verify( validate( this ) ); -
libcfa/src/concurrency/alarm.hfa
r9fb8f01 r3d5701e 23 23 #include "time.hfa" 24 24 25 struct thread_desc;25 struct $thread; 26 26 struct processor; 27 27 … … 43 43 44 44 union { 45 thread_desc* thrd; // thrd who created event45 $thread * thrd; // thrd who created event 46 46 processor * proc; // proc who created event 47 47 }; … … 53 53 typedef alarm_node_t ** __alarm_it_t; 54 54 55 void ?{}( alarm_node_t & this, thread_desc* thrd, Time alarm, Duration period );55 void ?{}( alarm_node_t & this, $thread * thrd, Time alarm, Duration period ); 56 56 void ?{}( alarm_node_t & this, processor * proc, Time alarm, Duration period ); 57 57 void ^?{}( alarm_node_t & this ); -
libcfa/src/concurrency/coroutine.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 17:20:57 201813 // Update Count : 912 // Last Modified On : Tue Feb 4 12:29:25 2020 13 // Update Count : 16 14 14 // 15 15 … … 37 37 38 38 extern "C" { 39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc*) __attribute__ ((__noreturn__));39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__)); 40 40 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__)); 41 41 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) { … … 89 89 } 90 90 91 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) {92 (this.context){ NULL, NULL};91 void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ) with( this ) { 92 (this.context){0p, 0p}; 93 93 (this.stack){storage, storageSize}; 94 94 this.name = name; 95 95 state = Start; 96 starter = NULL;97 last = NULL;98 cancellation = NULL;99 } 100 101 void ^?{}( coroutine_desc& this) {96 starter = 0p; 97 last = 0p; 98 cancellation = 0p; 99 } 100 101 void ^?{}($coroutine& this) { 102 102 if(this.state != Halted && this.state != Start && this.state != Primed) { 103 coroutine_desc* src = TL_GET( this_thread )->curr_cor;104 coroutine_desc* dst = &this;103 $coroutine * src = TL_GET( this_thread )->curr_cor; 104 $coroutine * dst = &this; 105 105 106 106 struct _Unwind_Exception storage; … … 115 115 } 116 116 117 CoroutineCtxSwitch( src, dst );117 $ctx_switch( src, dst ); 118 118 } 119 119 } … … 123 123 forall(dtype T | is_coroutine(T)) 124 124 void prime(T& cor) { 125 coroutine_desc* this = get_coroutine(cor);125 $coroutine* this = get_coroutine(cor); 126 126 assert(this->state == Start); 127 127 … … 131 131 132 132 [void *, size_t] __stack_alloc( size_t storageSize ) { 133 staticconst size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment133 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 134 134 assert(__page_size != 0l); 135 135 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; … … 157 157 158 158 void __stack_prepare( __stack_info_t * this, size_t create_size ) { 159 staticconst size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment159 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 160 160 bool userStack; 161 161 void * storage; … … 187 187 // is not inline (We can't inline Cforall in C) 188 188 extern "C" { 189 void __suspend_internal(void) { 190 suspend(); 191 } 192 193 void __leave_coroutine( coroutine_desc * src ) { 194 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter; 189 void __cfactx_cor_leave( struct $coroutine * src ) { 190 $coroutine * starter = src->cancellation != 0 ? src->last : src->starter; 195 191 196 192 src->state = Halted; … … 205 201 src->name, src, starter->name, starter ); 206 202 207 CoroutineCtxSwitch( src, starter ); 203 $ctx_switch( src, starter ); 204 } 205 206 struct $coroutine * __cfactx_cor_finish(void) { 207 struct $coroutine * cor = kernelTLS.this_thread->curr_cor; 208 209 if(cor->state == Primed) { 210 suspend(); 211 } 212 213 cor->state = Active; 214 215 return cor; 208 216 } 209 217 } -
libcfa/src/concurrency/coroutine.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 21 17:49:39 201913 // Update Count : 912 // Last Modified On : Tue Feb 4 12:29:26 2020 13 // Update Count : 11 14 14 // 15 15 … … 25 25 trait is_coroutine(dtype T) { 26 26 void main(T & this); 27 coroutine_desc* get_coroutine(T & this);27 $coroutine * get_coroutine(T & this); 28 28 }; 29 29 30 #define DECL_COROUTINE(X) static inline coroutine_desc* get_coroutine(X& this) { return &this.__cor; } void main(X& this)30 #define DECL_COROUTINE(X) static inline $coroutine* get_coroutine(X& this) { return &this.__cor; } void main(X& this) 31 31 32 32 //----------------------------------------------------------------------------- … … 35 35 // void ^?{}( coStack_t & this ); 36 36 37 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize );38 void ^?{}( coroutine_desc& this );37 void ?{}( $coroutine & this, const char name[], void * storage, size_t storageSize ); 38 void ^?{}( $coroutine & this ); 39 39 40 static inline void ?{}( coroutine_desc & this) { this{ "Anonymous Coroutine", NULL, 0 }; }41 static inline void ?{}( coroutine_desc & this, size_t stackSize) { this{ "Anonymous Coroutine", NULL, stackSize }; }42 static inline void ?{}( coroutine_desc& this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; }43 static inline void ?{}( coroutine_desc & this, const char * name) { this{ name, NULL, 0 }; }44 static inline void ?{}( coroutine_desc & this, const char * name, size_t stackSize ) { this{ name, NULL, stackSize }; }40 static inline void ?{}( $coroutine & this) { this{ "Anonymous Coroutine", 0p, 0 }; } 41 static inline void ?{}( $coroutine & this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; } 42 static inline void ?{}( $coroutine & this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; } 43 static inline void ?{}( $coroutine & this, const char name[]) { this{ name, 0p, 0 }; } 44 static inline void ?{}( $coroutine & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; } 45 45 46 46 //----------------------------------------------------------------------------- … … 54 54 void prime(T & cor); 55 55 56 static inline struct coroutine_desc* active_coroutine() { return TL_GET( this_thread )->curr_cor; }56 static inline struct $coroutine * active_coroutine() { return TL_GET( this_thread )->curr_cor; } 57 57 58 58 //----------------------------------------------------------------------------- … … 61 61 // Start coroutine routines 62 62 extern "C" { 63 forall(dtype T | is_coroutine(T)) 64 void CtxInvokeCoroutine(T * this); 63 void __cfactx_invoke_coroutine(void (*main)(void *), void * this); 65 64 66 forall(dtype T | is_coroutine(T))67 void CtxStart(T * this, void ( *invoke)(T*));65 forall(dtype T) 66 void __cfactx_start(void (*main)(T &), struct $coroutine * cor, T & this, void (*invoke)(void (*main)(void *), void *)); 68 67 69 extern void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc*) __attribute__ ((__noreturn__));68 extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine *) __attribute__ ((__noreturn__)); 70 69 71 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");70 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); 72 71 } 73 72 74 73 // Private wrappers for context switch and stack creation 75 74 // Wrapper for co 76 static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {75 static inline void $ctx_switch( $coroutine * src, $coroutine * dst ) __attribute__((nonnull (1, 2))) { 77 76 // set state of current coroutine to inactive 78 77 src->state = src->state == Halted ? Halted : Inactive; … … 83 82 // context switch to specified coroutine 84 83 verify( dst->context.SP ); 85 CtxSwitch( &src->context, &dst->context );86 // when CtxSwitch returns we are back in the src coroutine84 __cfactx_switch( &src->context, &dst->context ); 85 // when __cfactx_switch returns we are back in the src coroutine 87 86 88 87 // set state of new coroutine to active 89 88 src->state = Active; 90 89 91 if( unlikely(src->cancellation != NULL) ) {92 _ CtxCoroutine_Unwind(src->cancellation, src);90 if( unlikely(src->cancellation != 0p) ) { 91 __cfactx_coroutine_unwind(src->cancellation, src); 93 92 } 94 93 } … … 103 102 // will also migrate which means this value will 104 103 // stay in syn with the TLS 105 coroutine_desc* src = TL_GET( this_thread )->curr_cor;104 $coroutine * src = TL_GET( this_thread )->curr_cor; 106 105 107 106 assertf( src->last != 0, … … 114 113 src->name, src, src->last->name, src->last ); 115 114 116 CoroutineCtxSwitch( src, src->last );115 $ctx_switch( src, src->last ); 117 116 } 118 117 … … 125 124 // will also migrate which means this value will 126 125 // stay in syn with the TLS 127 coroutine_desc* src = TL_GET( this_thread )->curr_cor;128 coroutine_desc* dst = get_coroutine(cor);126 $coroutine * src = TL_GET( this_thread )->curr_cor; 127 $coroutine * dst = get_coroutine(cor); 129 128 130 if( unlikely(dst->context.SP == NULL) ) { 129 if( unlikely(dst->context.SP == 0p) ) { 130 TL_GET( this_thread )->curr_cor = dst; 131 131 __stack_prepare(&dst->stack, 65000); 132 CtxStart(&cor, CtxInvokeCoroutine); 132 __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine); 133 TL_GET( this_thread )->curr_cor = src; 133 134 } 134 135 … … 146 147 147 148 // always done for performance testing 148 CoroutineCtxSwitch( src, dst );149 $ctx_switch( src, dst ); 149 150 150 151 return cor; 151 152 } 152 153 153 static inline void resume( coroutine_desc * dst) {154 static inline void resume( $coroutine * dst ) __attribute__((nonnull (1))) { 154 155 // optimization : read TLS once and reuse it 155 156 // Safety note: this is preemption safe since if … … 157 158 // will also migrate which means this value will 158 159 // stay in syn with the TLS 159 coroutine_desc* src = TL_GET( this_thread )->curr_cor;160 $coroutine * src = TL_GET( this_thread )->curr_cor; 160 161 161 162 // not resuming self ? … … 171 172 172 173 // always done for performance testing 173 CoroutineCtxSwitch( src, dst );174 $ctx_switch( src, dst ); 174 175 } 175 176 -
libcfa/src/concurrency/invoke.c
r9fb8f01 r3d5701e 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern void __suspend_internal(void);32 extern void __ leave_coroutine( struct coroutine_desc* );33 extern void __ finish_creation( struct thread_desc *);34 extern void __leave_thread_monitor( struct thread_desc * this ); 31 extern struct $coroutine * __cfactx_cor_finish(void); 32 extern void __cfactx_cor_leave ( struct $coroutine * ); 33 extern void __cfactx_thrd_leave(); 34 35 35 extern void disable_interrupts() OPTIONAL_THREAD; 36 36 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); 37 37 38 void CtxInvokeCoroutine(38 void __cfactx_invoke_coroutine( 39 39 void (*main)(void *), 40 struct coroutine_desc *(*get_coroutine)(void *),41 40 void *this 42 41 ) { 43 struct coroutine_desc* cor = get_coroutine( this ); 42 // Finish setting up the coroutine by setting its state 43 struct $coroutine * cor = __cfactx_cor_finish(); 44 44 45 if(cor->state == Primed) { 46 __suspend_internal(); 47 } 48 49 cor->state = Active; 50 45 // Call the main of the coroutine 51 46 main( this ); 52 47 53 48 //Final suspend, should never return 54 __ leave_coroutine( cor );49 __cfactx_cor_leave( cor ); 55 50 __cabi_abort( "Resumed dead coroutine" ); 56 51 } 57 52 58 static _Unwind_Reason_Code _ CtxCoroutine_UnwindStop(53 static _Unwind_Reason_Code __cfactx_coroutine_unwindstop( 59 54 __attribute((__unused__)) int version, 60 55 _Unwind_Action actions, … … 67 62 // We finished unwinding the coroutine, 68 63 // leave it 69 __ leave_coroutine( param );64 __cfactx_cor_leave( param ); 70 65 __cabi_abort( "Resumed dead coroutine" ); 71 66 } … … 75 70 } 76 71 77 void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc* cor) __attribute__ ((__noreturn__));78 void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc* cor) {79 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _ CtxCoroutine_UnwindStop, cor );72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) __attribute__ ((__noreturn__)); 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine * cor) { 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); 80 75 printf("UNWIND ERROR %d after force unwind\n", ret); 81 76 abort(); 82 77 } 83 78 84 void CtxInvokeThread( 85 void (*dtor)(void *), 79 void __cfactx_invoke_thread( 86 80 void (*main)(void *), 87 struct thread_desc *(*get_thread)(void *),88 81 void *this 89 82 ) { 90 // Fetch the thread handle from the user defined thread structure91 struct thread_desc* thrd = get_thread( this );92 93 // First suspend, once the thread arrives here,94 // the function pointer to main can be invalidated without risk95 __finish_creation( thrd );96 97 83 // Officially start the thread by enabling preemption 98 84 enable_interrupts( __cfaabi_dbg_ctx ); … … 108 94 // The order of these 4 operations is very important 109 95 //Final suspend, should never return 110 __ leave_thread_monitor( thrd);96 __cfactx_thrd_leave(); 111 97 __cabi_abort( "Resumed dead thread" ); 112 98 } 113 99 114 115 void CtxStart( 100 void __cfactx_start( 116 101 void (*main)(void *), 117 struct coroutine_desc *(*get_coroutine)(void *),102 struct $coroutine * cor, 118 103 void *this, 119 104 void (*invoke)(void *) 120 105 ) { 121 struct coroutine_desc * cor = get_coroutine( this );122 106 struct __stack_t * stack = cor->stack.storage; 123 107 … … 138 122 139 123 fs->dummyReturn = NULL; 140 fs->argument[0] = this; // argument to invoke 124 fs->argument[0] = main; // argument to invoke 125 fs->argument[1] = this; // argument to invoke 141 126 fs->rturn = invoke; 142 127 … … 155 140 156 141 fs->dummyReturn = NULL; 157 fs->rturn = CtxInvokeStub; 158 fs->fixedRegisters[0] = this; 159 fs->fixedRegisters[1] = invoke; 142 fs->rturn = __cfactx_invoke_stub; 143 fs->fixedRegisters[0] = main; 144 fs->fixedRegisters[1] = this; 145 fs->fixedRegisters[2] = invoke; 160 146 161 147 #elif defined( __ARM_ARCH ) 162 148 #error ARM needs to be upgrade to use to parameters like X86/X64 (A.K.A. : I broke this and do not know how to fix it) 163 149 struct FakeStack { 164 150 float fpRegs[16]; // floating point registers … … 172 158 struct FakeStack *fs = (struct FakeStack *)cor->context.SP; 173 159 174 fs->intRegs[8] = CtxInvokeStub;160 fs->intRegs[8] = __cfactx_invoke_stub; 175 161 fs->arg[0] = this; 176 162 fs->arg[1] = invoke; -
libcfa/src/concurrency/invoke.h
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 22 18:19:13 201913 // Update Count : 4 012 // Last Modified On : Thu Dec 5 16:26:03 2019 13 // Update Count : 44 14 14 // 15 15 … … 46 46 #ifdef __cforall 47 47 extern "Cforall" { 48 extern thread_local struct KernelThreadData {49 struct thread_desc* volatile this_thread;48 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 49 struct $thread * volatile this_thread; 50 50 struct processor * volatile this_processor; 51 51 … … 55 55 volatile bool in_progress; 56 56 } preemption_state; 57 58 uint32_t rand_seed; 57 59 } kernelTLS __attribute__ ((tls_model ( "initial-exec" ))); 58 60 } … … 90 92 }; 91 93 92 enum coroutine_state { Halted, Start, Inactive, Active, Primed }; 93 94 struct coroutine_desc { 95 // context that is switch during a CtxSwitch 94 enum coroutine_state { Halted, Start, Primed, Inactive, Active, Rerun }; 95 enum __Preemption_Reason { __NO_PREEMPTION, __ALARM_PREEMPTION, __POLL_PREEMPTION, __MANUAL_PREEMPTION }; 96 97 struct $coroutine { 98 // context that is switch during a __cfactx_switch 96 99 struct __stack_context_t context; 97 100 … … 106 109 107 110 // first coroutine to resume this one 108 struct coroutine_desc* starter;111 struct $coroutine * starter; 109 112 110 113 // last coroutine to resume this one 111 struct coroutine_desc* last;114 struct $coroutine * last; 112 115 113 116 // If non-null stack must be unwound with this exception … … 125 128 }; 126 129 127 struct monitor_desc{130 struct $monitor { 128 131 // spinlock to protect internal data 129 132 struct __spinlock_t lock; 130 133 131 134 // current owner of the monitor 132 struct thread_desc* owner;135 struct $thread * owner; 133 136 134 137 // queue of threads that are blocked waiting for the monitor 135 __queue_t(struct thread_desc) entry_queue;138 __queue_t(struct $thread) entry_queue; 136 139 137 140 // stack of conditions to run next once we exit the monitor … … 150 153 struct __monitor_group_t { 151 154 // currently held monitors 152 __cfa_anonymous_object( __small_array_t( monitor_desc*) );155 __cfa_anonymous_object( __small_array_t($monitor*) ); 153 156 154 157 // last function that acquired monitors … … 156 159 }; 157 160 158 struct thread_desc{161 struct $thread { 159 162 // Core threading fields 160 // context that is switch during a CtxSwitch163 // context that is switch during a __cfactx_switch 161 164 struct __stack_context_t context; 162 165 163 166 // current execution status for coroutine 164 enum coroutine_state state; 167 volatile int state; 168 enum __Preemption_Reason preempted; 165 169 166 170 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it 167 171 168 172 // coroutine body used to store context 169 struct coroutine_descself_cor;173 struct $coroutine self_cor; 170 174 171 175 // current active context 172 struct coroutine_desc* curr_cor;176 struct $coroutine * curr_cor; 173 177 174 178 // monitor body used for mutual exclusion 175 struct monitor_descself_mon;179 struct $monitor self_mon; 176 180 177 181 // pointer to monitor with sufficient lifetime for current monitors 178 struct monitor_desc* self_mon_p;182 struct $monitor * self_mon_p; 179 183 180 184 // pointer to the cluster on which the thread is running … … 186 190 // Link lists fields 187 191 // instrusive link field for threads 188 struct thread_desc* next;192 struct $thread * next; 189 193 190 194 struct { 191 struct thread_desc* next;192 struct thread_desc* prev;195 struct $thread * next; 196 struct $thread * prev; 193 197 } node; 194 198 }; … … 196 200 #ifdef __cforall 197 201 extern "Cforall" { 198 static inline thread_desc *& get_next( thread_desc & this) {202 static inline $thread *& get_next( $thread & this ) __attribute__((const)) { 199 203 return this.next; 200 204 } 201 205 202 static inline [ thread_desc *&, thread_desc *& ] __get( thread_desc & this) {206 static inline [$thread *&, $thread *& ] __get( $thread & this ) __attribute__((const)) { 203 207 return this.node.[next, prev]; 204 208 } 205 209 206 210 static inline void ?{}(__monitor_group_t & this) { 207 (this.data){ NULL};211 (this.data){0p}; 208 212 (this.size){0}; 209 213 (this.func){NULL}; 210 214 } 211 215 212 static inline void ?{}(__monitor_group_t & this, struct monitor_desc** data, __lock_size_t size, fptr_t func) {216 static inline void ?{}(__monitor_group_t & this, struct $monitor ** data, __lock_size_t size, fptr_t func) { 213 217 (this.data){data}; 214 218 (this.size){size}; … … 216 220 } 217 221 218 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {222 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) { 219 223 if( (lhs.data != 0) != (rhs.data != 0) ) return false; 220 224 if( lhs.size != rhs.size ) return false; … … 250 254 251 255 // assembler routines that performs the context switch 252 extern void CtxInvokeStub( void );253 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");256 extern void __cfactx_invoke_stub( void ); 257 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); 254 258 // void CtxStore ( void * this ) asm ("CtxStore"); 255 259 // void CtxRet ( void * dst ) asm ("CtxRet"); -
libcfa/src/concurrency/kernel.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Jun 20 17:21:23 201913 // Update Count : 2512 // Last Modified On : Tue Feb 4 13:03:15 2020 13 // Update Count : 58 14 14 // 15 15 … … 26 26 #include <signal.h> 27 27 #include <unistd.h> 28 #include <limits.h> // PTHREAD_STACK_MIN 29 #include <sys/mman.h> // mprotect 28 30 } 29 31 … … 40 42 //----------------------------------------------------------------------------- 41 43 // Some assembly required 42 #if defined( __i386 )44 #if defined( __i386 ) 43 45 #define CtxGet( ctx ) \ 44 46 __asm__ volatile ( \ … … 108 110 //----------------------------------------------------------------------------- 109 111 //Start and stop routine for the kernel, declared first to make sure they run first 110 static void kernel_startup(void)__attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));111 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));112 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 113 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 112 114 113 115 //----------------------------------------------------------------------------- … … 115 117 KERNEL_STORAGE(cluster, mainCluster); 116 118 KERNEL_STORAGE(processor, mainProcessor); 117 KERNEL_STORAGE( thread_desc, mainThread);119 KERNEL_STORAGE($thread, mainThread); 118 120 KERNEL_STORAGE(__stack_t, mainThreadCtx); 119 121 120 122 cluster * mainCluster; 121 123 processor * mainProcessor; 122 thread_desc* mainThread;124 $thread * mainThread; 123 125 124 126 extern "C" { 125 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;127 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 126 128 } 127 129 … … 131 133 // Global state 132 134 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = { 135 NULL, // cannot use 0p 133 136 NULL, 134 NULL,135 { 1, false, false }137 { 1, false, false }, 138 6u //this should be seeded better but due to a bug calling rdtsc doesn't work 136 139 }; 137 140 … … 139 142 // Struct to steal stack 140 143 struct current_stack_info_t { 141 __stack_t * storage; // pointer to stack object142 void * base;// base of stack143 void * limit;// stack grows towards stack limit144 void * context;// address of cfa_context_t144 __stack_t * storage; // pointer to stack object 145 void * base; // base of stack 146 void * limit; // stack grows towards stack limit 147 void * context; // address of cfa_context_t 145 148 }; 146 149 … … 161 164 // Main thread construction 162 165 163 void ?{}( coroutine_desc& this, current_stack_info_t * info) with( this ) {166 void ?{}( $coroutine & this, current_stack_info_t * info) with( this ) { 164 167 stack.storage = info->storage; 165 168 with(*stack.storage) { … … 171 174 name = "Main Thread"; 172 175 state = Start; 173 starter = NULL;174 last = NULL;175 cancellation = NULL;176 } 177 178 void ?{}( thread_desc& this, current_stack_info_t * info) with( this ) {176 starter = 0p; 177 last = 0p; 178 cancellation = 0p; 179 } 180 181 void ?{}( $thread & this, current_stack_info_t * info) with( this ) { 179 182 state = Start; 180 183 self_cor{ info }; … … 184 187 self_mon.recursion = 1; 185 188 self_mon_p = &self_mon; 186 next = NULL;187 188 node.next = NULL;189 node.prev = NULL;189 next = 0p; 190 191 node.next = 0p; 192 node.prev = 0p; 190 193 doregister(curr_cluster, this); 191 194 … … 205 208 } 206 209 207 static void start(processor * this); 208 void ?{}(processor & this, const char * name, cluster & cltr) with( this ) { 210 static void * __invoke_processor(void * arg); 211 212 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) { 209 213 this.name = name; 210 214 this.cltr = &cltr; 211 215 terminated{ 0 }; 216 destroyer = 0p; 212 217 do_terminate = false; 213 preemption_alarm = NULL;218 preemption_alarm = 0p; 214 219 pending_preemption = false; 215 220 runner.proc = &this; … … 217 222 idleLock{}; 218 223 219 start( &this ); 224 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this); 225 226 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 227 228 __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this); 220 229 } 221 230 … … 231 240 } 232 241 233 pthread_join( kernel_thread, NULL ); 234 } 235 236 void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) { 242 pthread_join( kernel_thread, 0p ); 243 free( this.stack ); 244 } 245 246 void ?{}(cluster & this, const char name[], Duration preemption_rate) with( this ) { 237 247 this.name = name; 238 248 this.preemption_rate = preemption_rate; … … 254 264 // Kernel Scheduling logic 255 265 //============================================================================================= 256 static void runThread(processor * this, thread_desc * dst);257 static void finishRunning(processor * this);258 static void halt(processor * this);266 static $thread * __next_thread(cluster * this); 267 static void __run_thread(processor * this, $thread * dst); 268 static void __halt(processor * this); 259 269 260 270 //Main of the processor contexts 261 271 void main(processorCtx_t & runner) { 272 // Because of a bug, we couldn't initialized the seed on construction 273 // Do it here 274 kernelTLS.rand_seed ^= rdtscl(); 275 262 276 processor * this = runner.proc; 263 277 verify(this); … … 273 287 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 274 288 275 thread_desc * readyThread = NULL; 276 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) 277 { 278 readyThread = nextThread( this->cltr ); 279 280 if(readyThread) 281 { 282 verify( ! kernelTLS.preemption_state.enabled ); 283 284 runThread(this, readyThread); 285 286 verify( ! kernelTLS.preemption_state.enabled ); 287 288 //Some actions need to be taken from the kernel 289 finishRunning(this); 289 $thread * readyThread = 0p; 290 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 291 readyThread = __next_thread( this->cltr ); 292 293 if(readyThread) { 294 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 295 /* paranoid */ verifyf( readyThread->state == Inactive || readyThread->state == Start || readyThread->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", readyThread->state, readyThread->preempted); 296 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 297 298 __run_thread(this, readyThread); 299 300 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 290 301 291 302 spin_count = 0; 292 } 293 else 294 { 303 } else { 295 304 // spin(this, &spin_count); 296 halt(this);305 __halt(this); 297 306 } 298 307 } … … 314 323 // runThread runs a thread by context switching 315 324 // from the processor coroutine to the target thread 316 static void runThread(processor * this, thread_desc * thrd_dst) { 317 coroutine_desc * proc_cor = get_coroutine(this->runner); 318 319 // Reset the terminating actions here 320 this->finish.action_code = No_Action; 325 static void __run_thread(processor * this, $thread * thrd_dst) { 326 $coroutine * proc_cor = get_coroutine(this->runner); 321 327 322 328 // Update global state 323 329 kernelTLS.this_thread = thrd_dst; 324 330 325 // set state of processor coroutine to inactive and the thread to active 326 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 327 thrd_dst->state = Active; 328 329 // set context switch to the thread that the processor is executing 330 verify( thrd_dst->context.SP ); 331 CtxSwitch( &proc_cor->context, &thrd_dst->context ); 332 // when CtxSwitch returns we are back in the processor coroutine 333 334 // set state of processor coroutine to active and the thread to inactive 335 thrd_dst->state = thrd_dst->state == Halted ? Halted : Inactive; 331 // set state of processor coroutine to inactive 332 verify(proc_cor->state == Active); 333 proc_cor->state = Inactive; 334 335 // Actually run the thread 336 RUNNING: while(true) { 337 if(unlikely(thrd_dst->preempted)) { 338 thrd_dst->preempted = __NO_PREEMPTION; 339 verify(thrd_dst->state == Active || thrd_dst->state == Rerun); 340 } else { 341 verify(thrd_dst->state == Start || thrd_dst->state == Primed || thrd_dst->state == Inactive); 342 thrd_dst->state = Active; 343 } 344 345 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 346 347 // set context switch to the thread that the processor is executing 348 verify( thrd_dst->context.SP ); 349 __cfactx_switch( &proc_cor->context, &thrd_dst->context ); 350 // when __cfactx_switch returns we are back in the processor coroutine 351 352 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 353 354 355 // We just finished running a thread, there are a few things that could have happened. 356 // 1 - Regular case : the thread has blocked and now one has scheduled it yet. 357 // 2 - Racy case : the thread has blocked but someone has already tried to schedule it. 358 // 3 - Polite Racy case : the thread has blocked, someone has already tried to schedule it, but the thread is nice and wants to go through the ready-queue any way 359 // 4 - Preempted 360 // In case 1, we may have won a race so we can't write to the state again. 361 // In case 2, we lost the race so we now own the thread. 362 // In case 3, we lost the race but can just reschedule the thread. 363 364 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 365 // The thread was preempted, reschedule it and reset the flag 366 __schedule_thread( thrd_dst ); 367 break RUNNING; 368 } 369 370 // set state of processor coroutine to active and the thread to inactive 371 static_assert(sizeof(thrd_dst->state) == sizeof(int)); 372 enum coroutine_state old_state = __atomic_exchange_n(&thrd_dst->state, Inactive, __ATOMIC_SEQ_CST); 373 switch(old_state) { 374 case Halted: 375 // The thread has halted, it should never be scheduled/run again, leave it back to Halted and move on 376 thrd_dst->state = Halted; 377 378 // We may need to wake someone up here since 379 unpark( this->destroyer ); 380 this->destroyer = 0p; 381 break RUNNING; 382 case Active: 383 // This is case 1, the regular case, nothing more is needed 384 break RUNNING; 385 case Rerun: 386 // This is case 2, the racy case, someone tried to run this thread before it finished blocking 387 // In this case, just run it again. 388 continue RUNNING; 389 default: 390 // This makes no sense, something is wrong abort 391 abort("Finished running a thread that was Inactive/Start/Primed %d\n", old_state); 392 } 393 } 394 395 // Just before returning to the processor, set the processor coroutine to active 336 396 proc_cor->state = Active; 337 397 } 338 398 339 399 // KERNEL_ONLY 340 static void returnToKernel() { 341 coroutine_desc * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 342 thread_desc * thrd_src = kernelTLS.this_thread; 343 344 // set state of current coroutine to inactive 345 thrd_src->state = thrd_src->state == Halted ? Halted : Inactive; 346 proc_cor->state = Active; 347 int local_errno = *__volatile_errno(); 348 #if defined( __i386 ) || defined( __x86_64 ) 349 __x87_store; 350 #endif 351 352 // set new coroutine that the processor is executing 353 // and context switch to it 354 verify( proc_cor->context.SP ); 355 CtxSwitch( &thrd_src->context, &proc_cor->context ); 356 357 // set state of new coroutine to active 358 proc_cor->state = proc_cor->state == Halted ? Halted : Inactive; 359 thrd_src->state = Active; 360 361 #if defined( __i386 ) || defined( __x86_64 ) 362 __x87_load; 363 #endif 364 *__volatile_errno() = local_errno; 365 } 366 367 // KERNEL_ONLY 368 // Once a thread has finished running, some of 369 // its final actions must be executed from the kernel 370 static void finishRunning(processor * this) with( this->finish ) { 371 verify( ! kernelTLS.preemption_state.enabled ); 372 choose( action_code ) { 373 case No_Action: 374 break; 375 case Release: 376 unlock( *lock ); 377 case Schedule: 378 ScheduleThread( thrd ); 379 case Release_Schedule: 380 unlock( *lock ); 381 ScheduleThread( thrd ); 382 case Release_Multi: 383 for(int i = 0; i < lock_count; i++) { 384 unlock( *locks[i] ); 385 } 386 case Release_Multi_Schedule: 387 for(int i = 0; i < lock_count; i++) { 388 unlock( *locks[i] ); 389 } 390 for(int i = 0; i < thrd_count; i++) { 391 ScheduleThread( thrds[i] ); 392 } 393 case Callback: 394 callback(); 395 default: 396 abort("KERNEL ERROR: Unexpected action to run after thread"); 397 } 400 void returnToKernel() { 401 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 402 $coroutine * proc_cor = get_coroutine(kernelTLS.this_processor->runner); 403 $thread * thrd_src = kernelTLS.this_thread; 404 405 // Run the thread on this processor 406 { 407 int local_errno = *__volatile_errno(); 408 #if defined( __i386 ) || defined( __x86_64 ) 409 __x87_store; 410 #endif 411 verify( proc_cor->context.SP ); 412 __cfactx_switch( &thrd_src->context, &proc_cor->context ); 413 #if defined( __i386 ) || defined( __x86_64 ) 414 __x87_load; 415 #endif 416 *__volatile_errno() = local_errno; 417 } 418 419 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 398 420 } 399 421 … … 402 424 // This is the entry point for processors (kernel threads) 403 425 // It effectively constructs a coroutine by stealing the pthread stack 404 static void * CtxInvokeProcessor(void * arg) {426 static void * __invoke_processor(void * arg) { 405 427 processor * proc = (processor *) arg; 406 428 kernelTLS.this_processor = proc; 407 kernelTLS.this_thread = NULL;429 kernelTLS.this_thread = 0p; 408 430 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 409 431 // SKULLDUGGERY: We want to create a context for the processor coroutine … … 418 440 419 441 //Set global state 420 kernelTLS.this_thread = NULL;442 kernelTLS.this_thread = 0p; 421 443 422 444 //We now have a proper context from which to schedule threads … … 434 456 __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner); 435 457 436 return NULL; 437 } 438 439 static void start(processor * this) { 440 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this); 441 442 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 443 444 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 458 return 0p; 459 } 460 461 static void Abort( int ret, const char func[] ) { 462 if ( ret ) { // pthread routines return errno values 463 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) ); 464 } // if 465 } // Abort 466 467 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 468 pthread_attr_t attr; 469 470 Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 471 472 size_t stacksize; 473 // default stack size, normally defined by shell limit 474 Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" ); 475 assert( stacksize >= PTHREAD_STACK_MIN ); 476 477 void * stack; 478 __cfaabi_dbg_debug_do( 479 stack = memalign( __page_size, stacksize + __page_size ); 480 // pthread has no mechanism to create the guard page in user supplied stack. 481 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 482 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 483 } // if 484 ); 485 __cfaabi_dbg_no_debug_do( 486 stack = malloc( stacksize ); 487 ); 488 489 Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" ); 490 491 Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" ); 492 return stack; 445 493 } 446 494 447 495 // KERNEL_ONLY 448 voidkernel_first_resume( processor * this ) {449 thread_desc* src = mainThread;450 coroutine_desc* dst = get_coroutine(this->runner);496 static void __kernel_first_resume( processor * this ) { 497 $thread * src = mainThread; 498 $coroutine * dst = get_coroutine(this->runner); 451 499 452 500 verify( ! kernelTLS.preemption_state.enabled ); 453 501 502 kernelTLS.this_thread->curr_cor = dst; 454 503 __stack_prepare( &dst->stack, 65000 ); 455 CtxStart(&this->runner, CtxInvokeCoroutine);504 __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine); 456 505 457 506 verify( ! kernelTLS.preemption_state.enabled ); … … 465 514 // context switch to specified coroutine 466 515 verify( dst->context.SP ); 467 CtxSwitch( &src->context, &dst->context ); 468 // when CtxSwitch returns we are back in the src coroutine 516 __cfactx_switch( &src->context, &dst->context ); 517 // when __cfactx_switch returns we are back in the src coroutine 518 519 mainThread->curr_cor = &mainThread->self_cor; 469 520 470 521 // set state of new coroutine to active … … 475 526 476 527 // KERNEL_ONLY 477 voidkernel_last_resume( processor * this ) {478 coroutine_desc* src = &mainThread->self_cor;479 coroutine_desc* dst = get_coroutine(this->runner);528 static void __kernel_last_resume( processor * this ) { 529 $coroutine * src = &mainThread->self_cor; 530 $coroutine * dst = get_coroutine(this->runner); 480 531 481 532 verify( ! kernelTLS.preemption_state.enabled ); … … 484 535 485 536 // context switch to the processor 486 CtxSwitch( &src->context, &dst->context );537 __cfactx_switch( &src->context, &dst->context ); 487 538 } 488 539 489 540 //----------------------------------------------------------------------------- 490 541 // Scheduler routines 491 492 542 // KERNEL ONLY 493 void ScheduleThread( thread_desc * thrd ) { 494 verify( thrd ); 495 verify( thrd->state != Halted ); 496 497 verify( ! kernelTLS.preemption_state.enabled ); 498 499 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next ); 500 501 with( *thrd->curr_cluster ) { 502 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 503 bool was_empty = !(ready_queue != 0); 504 append( ready_queue, thrd ); 505 unlock( ready_queue_lock ); 506 507 if(was_empty) { 508 lock (proc_list_lock __cfaabi_dbg_ctx2); 509 if(idles) { 510 wake_fast(idles.head); 511 } 512 unlock (proc_list_lock); 543 void __schedule_thread( $thread * thrd ) with( *thrd->curr_cluster ) { 544 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 545 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) 546 /* paranoid */ if( thrd->state == Inactive || thrd->state == Start ) assertf( thrd->preempted == __NO_PREEMPTION, 547 "Error inactive thread marked as preempted, state %d, preemption %d\n", thrd->state, thrd->preempted ); 548 /* paranoid */ if( thrd->preempted != __NO_PREEMPTION ) assertf(thrd->state == Active || thrd->state == Rerun, 549 "Error preempted thread marked as not currently running, state %d, preemption %d\n", thrd->state, thrd->preempted ); 550 /* paranoid */ #endif 551 /* paranoid */ verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 552 553 lock ( ready_queue_lock __cfaabi_dbg_ctx2 ); 554 bool was_empty = !(ready_queue != 0); 555 append( ready_queue, thrd ); 556 unlock( ready_queue_lock ); 557 558 if(was_empty) { 559 lock (proc_list_lock __cfaabi_dbg_ctx2); 560 if(idles) { 561 wake_fast(idles.head); 513 562 } 514 else if( struct processor * idle = idles.head ) {515 wake_fast(idle);516 }517 518 } 519 520 verify( ! kernelTLS.preemption_state.enabled );563 unlock (proc_list_lock); 564 } 565 else if( struct processor * idle = idles.head ) { 566 wake_fast(idle); 567 } 568 569 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 521 570 } 522 571 523 572 // KERNEL ONLY 524 thread_desc * nextThread(cluster * this) with( *this ) { 525 verify( ! kernelTLS.preemption_state.enabled ); 573 static $thread * __next_thread(cluster * this) with( *this ) { 574 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 575 526 576 lock( ready_queue_lock __cfaabi_dbg_ctx2 ); 527 thread_desc* head = pop_head( ready_queue );577 $thread * head = pop_head( ready_queue ); 528 578 unlock( ready_queue_lock ); 529 verify( ! kernelTLS.preemption_state.enabled ); 579 580 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 530 581 return head; 531 582 } 532 583 533 void BlockInternal() { 584 void unpark( $thread * thrd ) { 585 if( !thrd ) return; 586 534 587 disable_interrupts(); 535 verify( ! kernelTLS.preemption_state.enabled ); 588 static_assert(sizeof(thrd->state) == sizeof(int)); 589 enum coroutine_state old_state = __atomic_exchange_n(&thrd->state, Rerun, __ATOMIC_SEQ_CST); 590 switch(old_state) { 591 case Active: 592 // Wake won the race, the thread will reschedule/rerun itself 593 break; 594 case Inactive: 595 /* paranoid */ verify( ! thrd->preempted != __NO_PREEMPTION ); 596 597 // Wake lost the race, 598 thrd->state = Inactive; 599 __schedule_thread( thrd ); 600 break; 601 case Rerun: 602 abort("More than one thread attempted to schedule thread %p\n", thrd); 603 break; 604 case Halted: 605 case Start: 606 case Primed: 607 default: 608 // This makes no sense, something is wrong abort 609 abort(); 610 } 611 enable_interrupts( __cfaabi_dbg_ctx ); 612 } 613 614 void park( void ) { 615 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 616 disable_interrupts(); 617 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 618 /* paranoid */ verify( kernelTLS.this_thread->preempted == __NO_PREEMPTION ); 619 536 620 returnToKernel(); 537 verify( ! kernelTLS.preemption_state.enabled ); 621 622 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 538 623 enable_interrupts( __cfaabi_dbg_ctx ); 539 } 540 541 void BlockInternal( __spinlock_t * lock ) { 624 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 625 626 } 627 628 // KERNEL ONLY 629 void __leave_thread() { 630 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 631 returnToKernel(); 632 abort(); 633 } 634 635 // KERNEL ONLY 636 bool force_yield( __Preemption_Reason reason ) { 637 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 542 638 disable_interrupts(); 543 with( *kernelTLS.this_processor ) { 544 finish.action_code = Release; 545 finish.lock = lock; 546 } 547 548 verify( ! kernelTLS.preemption_state.enabled ); 549 returnToKernel(); 550 verify( ! kernelTLS.preemption_state.enabled ); 551 552 enable_interrupts( __cfaabi_dbg_ctx ); 553 } 554 555 void BlockInternal( thread_desc * thrd ) { 556 disable_interrupts(); 557 with( * kernelTLS.this_processor ) { 558 finish.action_code = Schedule; 559 finish.thrd = thrd; 560 } 561 562 verify( ! kernelTLS.preemption_state.enabled ); 563 returnToKernel(); 564 verify( ! kernelTLS.preemption_state.enabled ); 565 566 enable_interrupts( __cfaabi_dbg_ctx ); 567 } 568 569 void BlockInternal( __spinlock_t * lock, thread_desc * thrd ) { 570 assert(thrd); 571 disable_interrupts(); 572 with( * kernelTLS.this_processor ) { 573 finish.action_code = Release_Schedule; 574 finish.lock = lock; 575 finish.thrd = thrd; 576 } 577 578 verify( ! kernelTLS.preemption_state.enabled ); 579 returnToKernel(); 580 verify( ! kernelTLS.preemption_state.enabled ); 581 582 enable_interrupts( __cfaabi_dbg_ctx ); 583 } 584 585 void BlockInternal(__spinlock_t * locks [], unsigned short count) { 586 disable_interrupts(); 587 with( * kernelTLS.this_processor ) { 588 finish.action_code = Release_Multi; 589 finish.locks = locks; 590 finish.lock_count = count; 591 } 592 593 verify( ! kernelTLS.preemption_state.enabled ); 594 returnToKernel(); 595 verify( ! kernelTLS.preemption_state.enabled ); 596 597 enable_interrupts( __cfaabi_dbg_ctx ); 598 } 599 600 void BlockInternal(__spinlock_t * locks [], unsigned short lock_count, thread_desc * thrds [], unsigned short thrd_count) { 601 disable_interrupts(); 602 with( *kernelTLS.this_processor ) { 603 finish.action_code = Release_Multi_Schedule; 604 finish.locks = locks; 605 finish.lock_count = lock_count; 606 finish.thrds = thrds; 607 finish.thrd_count = thrd_count; 608 } 609 610 verify( ! kernelTLS.preemption_state.enabled ); 611 returnToKernel(); 612 verify( ! kernelTLS.preemption_state.enabled ); 613 614 enable_interrupts( __cfaabi_dbg_ctx ); 615 } 616 617 void BlockInternal(__finish_callback_fptr_t callback) { 618 disable_interrupts(); 619 with( *kernelTLS.this_processor ) { 620 finish.action_code = Callback; 621 finish.callback = callback; 622 } 623 624 verify( ! kernelTLS.preemption_state.enabled ); 625 returnToKernel(); 626 verify( ! kernelTLS.preemption_state.enabled ); 627 628 enable_interrupts( __cfaabi_dbg_ctx ); 629 } 630 631 // KERNEL ONLY 632 void LeaveThread(__spinlock_t * lock, thread_desc * thrd) { 633 verify( ! kernelTLS.preemption_state.enabled ); 634 with( * kernelTLS.this_processor ) { 635 finish.action_code = thrd ? Release_Schedule : Release; 636 finish.lock = lock; 637 finish.thrd = thrd; 638 } 639 640 returnToKernel(); 639 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 640 641 $thread * thrd = kernelTLS.this_thread; 642 /* paranoid */ verify(thrd->state == Active || thrd->state == Rerun); 643 644 // SKULLDUGGERY: It is possible that we are preempting this thread just before 645 // it was going to park itself. If that is the case and it is already using the 646 // intrusive fields then we can't use them to preempt the thread 647 // If that is the case, abandon the preemption. 648 bool preempted = false; 649 if(thrd->next == 0p) { 650 preempted = true; 651 thrd->preempted = reason; 652 returnToKernel(); 653 } 654 655 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 656 enable_interrupts_noPoll(); 657 /* paranoid */ verify( kernelTLS.preemption_state.enabled ); 658 659 return preempted; 641 660 } 642 661 … … 646 665 //----------------------------------------------------------------------------- 647 666 // Kernel boot procedures 648 static void kernel_startup(void) {667 static void __kernel_startup(void) { 649 668 verify( ! kernelTLS.preemption_state.enabled ); 650 669 __cfaabi_dbg_print_safe("Kernel : Starting\n"); … … 664 683 // SKULLDUGGERY: the mainThread steals the process main thread 665 684 // which will then be scheduled by the mainProcessor normally 666 mainThread = ( thread_desc*)&storage_mainThread;685 mainThread = ($thread *)&storage_mainThread; 667 686 current_stack_info_t info; 668 687 info.storage = (__stack_t*)&storage_mainThreadCtx; … … 676 695 void ?{}(processorCtx_t & this, processor * proc) { 677 696 (this.__cor){ "Processor" }; 678 this.__cor.starter = NULL;697 this.__cor.starter = 0p; 679 698 this.proc = proc; 680 699 } … … 685 704 terminated{ 0 }; 686 705 do_terminate = false; 687 preemption_alarm = NULL;706 preemption_alarm = 0p; 688 707 pending_preemption = false; 689 708 kernel_thread = pthread_self(); … … 707 726 // Add the main thread to the ready queue 708 727 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 709 ScheduleThread(mainThread);728 __schedule_thread(mainThread); 710 729 711 730 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 712 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that731 // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that 713 732 // mainThread is on the ready queue when this call is made. 714 kernel_first_resume( kernelTLS.this_processor );733 __kernel_first_resume( kernelTLS.this_processor ); 715 734 716 735 … … 724 743 } 725 744 726 static void kernel_shutdown(void) {745 static void __kernel_shutdown(void) { 727 746 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 728 747 … … 735 754 // which is currently here 736 755 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 737 kernel_last_resume( kernelTLS.this_processor );756 __kernel_last_resume( kernelTLS.this_processor ); 738 757 mainThread->self_cor.state = Halted; 739 758 … … 761 780 // Kernel Quiescing 762 781 //============================================================================================= 763 static void halt(processor * this) with( *this ) {782 static void __halt(processor * this) with( *this ) { 764 783 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 765 784 … … 803 822 sigemptyset( &mask ); 804 823 sigaddset( &mask, SIGALRM ); // block SIGALRM signals 805 sigsuspend( &mask ); // block the processor to prevent further damage during abort 806 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 824 sigaddset( &mask, SIGUSR1 ); // block SIGALRM signals 825 sigsuspend( &mask ); // block the processor to prevent further damage during abort 826 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 807 827 } 808 828 else { … … 815 835 816 836 void kernel_abort_msg( void * kernel_data, char * abort_text, int abort_text_size ) { 817 thread_desc* thrd = kernel_data;837 $thread * thrd = kernel_data; 818 838 819 839 if(thrd) { 820 840 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 821 __cfaabi_ dbg_bits_write(abort_text, len );841 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 822 842 823 843 if ( &thrd->self_cor != thrd->curr_cor ) { 824 844 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor ); 825 __cfaabi_ dbg_bits_write(abort_text, len );845 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 826 846 } 827 847 else { 828 __cfaabi_ dbg_bits_write(".\n", 2 );848 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 ); 829 849 } 830 850 } 831 851 else { 832 852 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); 833 __cfaabi_ dbg_bits_write(abort_text, len );853 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 834 854 } 835 855 } … … 842 862 843 863 extern "C" { 844 void __cfaabi_ dbg_bits_acquire() {864 void __cfaabi_bits_acquire() { 845 865 lock( kernel_debug_lock __cfaabi_dbg_ctx2 ); 846 866 } 847 867 848 void __cfaabi_ dbg_bits_release() {868 void __cfaabi_bits_release() { 849 869 unlock( kernel_debug_lock ); 850 870 } … … 871 891 872 892 // atomically release spin lock and block 873 BlockInternal( &lock ); 893 unlock( lock ); 894 park(); 874 895 } 875 896 else { … … 879 900 880 901 void V(semaphore & this) with( this ) { 881 thread_desc * thrd = NULL;902 $thread * thrd = 0p; 882 903 lock( lock __cfaabi_dbg_ctx2 ); 883 904 count += 1; … … 890 911 891 912 // make new owner 892 WakeThread( thrd );913 unpark( thrd ); 893 914 } 894 915 … … 907 928 } 908 929 909 void doregister( cluster * cltr, thread_desc& thrd ) {930 void doregister( cluster * cltr, $thread & thrd ) { 910 931 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 911 932 cltr->nthreads += 1; … … 914 935 } 915 936 916 void unregister( cluster * cltr, thread_desc& thrd ) {937 void unregister( cluster * cltr, $thread & thrd ) { 917 938 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 918 939 remove(cltr->threads, thrd ); … … 939 960 __cfaabi_dbg_debug_do( 940 961 extern "C" { 941 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {962 void __cfaabi_dbg_record(__spinlock_t & this, const char prev_name[]) { 942 963 this.prev_name = prev_name; 943 964 this.prev_thrd = kernelTLS.this_thread; … … 948 969 //----------------------------------------------------------------------------- 949 970 // Debug 950 bool threading_enabled(void) {971 bool threading_enabled(void) __attribute__((const)) { 951 972 return true; 952 973 } -
libcfa/src/concurrency/kernel.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 22 11:39:17 201913 // Update Count : 1612 // Last Modified On : Tue Feb 4 12:29:26 2020 13 // Update Count : 22 14 14 // 15 15 … … 20 20 #include "invoke.h" 21 21 #include "time_t.hfa" 22 #include "coroutine.hfa" 22 23 23 24 extern "C" { … … 31 32 __spinlock_t lock; 32 33 int count; 33 __queue_t( thread_desc) waiting;34 __queue_t($thread) waiting; 34 35 }; 35 36 … … 43 44 // Processor 44 45 extern struct cluster * mainCluster; 45 46 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };47 48 typedef void (*__finish_callback_fptr_t)(void);49 50 //TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)51 struct FinishAction {52 FinishOpCode action_code;53 /*54 // Union of possible actions55 union {56 // Option 1 : locks and threads57 struct {58 // 1 thread or N thread59 union {60 thread_desc * thrd;61 struct {62 thread_desc ** thrds;63 unsigned short thrd_count;64 };65 };66 // 1 lock or N lock67 union {68 __spinlock_t * lock;69 struct {70 __spinlock_t ** locks;71 unsigned short lock_count;72 };73 };74 };75 // Option 2 : action pointer76 __finish_callback_fptr_t callback;77 };78 /*/79 thread_desc * thrd;80 thread_desc ** thrds;81 unsigned short thrd_count;82 __spinlock_t * lock;83 __spinlock_t ** locks;84 unsigned short lock_count;85 __finish_callback_fptr_t callback;86 //*/87 };88 static inline void ?{}(FinishAction & this) {89 this.action_code = No_Action;90 this.thrd = NULL;91 this.lock = NULL;92 }93 static inline void ^?{}(FinishAction &) {}94 46 95 47 // Processor … … 115 67 // RunThread data 116 68 // Action to do after a thread is ran 117 struct FinishAction finish;69 $thread * destroyer; 118 70 119 71 // Preemption data … … 134 86 semaphore terminated; 135 87 88 // pthread Stack 89 void * stack; 90 136 91 // Link lists fields 137 92 struct __dbg_node_proc { … … 146 101 }; 147 102 148 void ?{}(processor & this, const char * name, struct cluster & cltr);103 void ?{}(processor & this, const char name[], struct cluster & cltr); 149 104 void ^?{}(processor & this); 150 105 151 106 static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; } 152 107 static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; } 153 static inline void ?{}(processor & this, const char * name) { this{name, *mainCluster }; }108 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } 154 109 155 static inline [processor *&, processor *& ] __get( processor & this ) { 156 return this.node.[next, prev]; 157 } 110 static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; } 158 111 159 112 //----------------------------------------------------------------------------- … … 164 117 165 118 // Ready queue for threads 166 __queue_t( thread_desc) ready_queue;119 __queue_t($thread) ready_queue; 167 120 168 121 // Name of the cluster … … 180 133 // List of threads 181 134 __spinlock_t thread_list_lock; 182 __dllist_t(struct thread_desc) threads;135 __dllist_t(struct $thread) threads; 183 136 unsigned int nthreads; 184 137 … … 191 144 extern Duration default_preemption(); 192 145 193 void ?{} (cluster & this, const char * name, Duration preemption_rate);146 void ?{} (cluster & this, const char name[], Duration preemption_rate); 194 147 void ^?{}(cluster & this); 195 148 196 149 static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption()}; } 197 150 static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; } 198 static inline void ?{} (cluster & this, const char * name) { this{name, default_preemption()}; }151 static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption()}; } 199 152 200 static inline [cluster *&, cluster *& ] __get( cluster & this ) { 201 return this.node.[next, prev]; 202 } 153 static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; } 203 154 204 155 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE -
libcfa/src/concurrency/kernel_private.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Mar 29 14:06:40 201813 // Update Count : 312 // Last Modified On : Sat Nov 30 19:25:02 2019 13 // Update Count : 8 14 14 // 15 15 … … 31 31 } 32 32 33 void ScheduleThread( thread_desc * ); 34 static inline void WakeThread( thread_desc * thrd ) { 35 if( !thrd ) return; 36 37 disable_interrupts(); 38 ScheduleThread( thrd ); 39 enable_interrupts( __cfaabi_dbg_ctx ); 40 } 41 thread_desc * nextThread(cluster * this); 33 void __schedule_thread( $thread * ) __attribute__((nonnull (1))); 42 34 43 35 //Block current thread and release/wake-up the following resources 44 void BlockInternal(void); 45 void BlockInternal(__spinlock_t * lock); 46 void BlockInternal(thread_desc * thrd); 47 void BlockInternal(__spinlock_t * lock, thread_desc * thrd); 48 void BlockInternal(__spinlock_t * locks [], unsigned short count); 49 void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count); 50 void BlockInternal(__finish_callback_fptr_t callback); 51 void LeaveThread(__spinlock_t * lock, thread_desc * thrd); 36 void __leave_thread() __attribute__((noreturn)); 52 37 53 38 //----------------------------------------------------------------------------- 54 39 // Processor 55 40 void main(processorCtx_t *); 41 42 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 56 43 57 44 static inline void wake_fast(processor * this) { … … 84 71 // Threads 85 72 extern "C" { 86 forall(dtype T | is_thread(T)) 87 void CtxInvokeThread(T * this); 73 void __cfactx_invoke_thread(void (*main)(void *), void * this); 88 74 } 89 75 90 extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);91 92 76 __cfaabi_dbg_debug_do( 93 extern void __cfaabi_dbg_thread_register ( thread_desc* thrd );94 extern void __cfaabi_dbg_thread_unregister( thread_desc* thrd );77 extern void __cfaabi_dbg_thread_register ( $thread * thrd ); 78 extern void __cfaabi_dbg_thread_unregister( $thread * thrd ); 95 79 ) 96 80 … … 99 83 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 100 84 85 static inline uint32_t __tls_rand() { 86 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6; 87 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21; 88 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7; 89 return kernelTLS.rand_seed; 90 } 91 101 92 102 93 void doregister( struct cluster & cltr ); 103 94 void unregister( struct cluster & cltr ); 104 95 105 void doregister( struct cluster * cltr, struct thread_desc& thrd );106 void unregister( struct cluster * cltr, struct thread_desc& thrd );96 void doregister( struct cluster * cltr, struct $thread & thrd ); 97 void unregister( struct cluster * cltr, struct $thread & thrd ); 107 98 108 99 void doregister( struct cluster * cltr, struct processor * proc ); -
libcfa/src/concurrency/monitor.cfa
r9fb8f01 r3d5701e 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // monitor_desc.c --7 // $monitor.c -- 8 8 // 9 9 // Author : Thierry Delisle 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 14:30:26 201813 // Update Count : 912 // Last Modified On : Wed Dec 4 07:55:14 2019 13 // Update Count : 10 14 14 // 15 15 … … 27 27 //----------------------------------------------------------------------------- 28 28 // Forward declarations 29 static inline void set_owner ( monitor_desc * this, thread_desc* owner );30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc* owner );31 static inline void set_mask ( monitor_desc* storage [], __lock_size_t count, const __waitfor_mask_t & mask );32 static inline void reset_mask( monitor_desc* this );33 34 static inline thread_desc * next_thread( monitor_desc* this );35 static inline bool is_accepted( monitor_desc* this, const __monitor_group_t & monitors );29 static inline void __set_owner ( $monitor * this, $thread * owner ); 30 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread * owner ); 31 static inline void set_mask ( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 32 static inline void reset_mask( $monitor * this ); 33 34 static inline $thread * next_thread( $monitor * this ); 35 static inline bool is_accepted( $monitor * this, const __monitor_group_t & monitors ); 36 36 37 37 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 38 static inline void lock_all ( monitor_desc* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );38 static inline void lock_all ( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 39 39 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 40 static inline void unlock_all( monitor_desc* locks [], __lock_size_t count );41 42 static inline void save ( monitor_desc* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );43 static inline void restore( monitor_desc* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );44 45 static inline void init ( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );46 static inline void init_push( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );47 48 static inline thread_desc* check_condition ( __condition_criterion_t * );40 static inline void unlock_all( $monitor * locks [], __lock_size_t count ); 41 42 static inline void save ( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 43 static inline void restore( $monitor * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 44 45 static inline void init ( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 46 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 48 static inline $thread * check_condition ( __condition_criterion_t * ); 49 49 static inline void brand_condition ( condition & ); 50 static inline [ thread_desc *, int] search_entry_queue( const __waitfor_mask_t &, monitor_desc* monitors [], __lock_size_t count );50 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor * monitors [], __lock_size_t count ); 51 51 52 52 forall(dtype T | sized( T )) 53 53 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 54 54 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 55 static inline __lock_size_t aggregate ( monitor_desc* storage [], const __waitfor_mask_t & mask );55 static inline __lock_size_t aggregate ( $monitor * storage [], const __waitfor_mask_t & mask ); 56 56 57 57 //----------------------------------------------------------------------------- … … 68 68 69 69 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 70 monitor_desc** monitors = mons; /* Save the targeted monitors */ \70 $monitor ** monitors = mons; /* Save the targeted monitors */ \ 71 71 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 72 72 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ … … 80 80 //----------------------------------------------------------------------------- 81 81 // Enter/Leave routines 82 83 84 extern "C" { 85 // Enter single monitor 86 static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) { 87 // Lock the monitor spinlock 88 lock( this->lock __cfaabi_dbg_ctx2 ); 89 // Interrupts disable inside critical section 90 thread_desc * thrd = kernelTLS.this_thread; 91 92 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 93 94 if( !this->owner ) { 95 // No one has the monitor, just take it 96 set_owner( this, thrd ); 97 98 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 99 } 100 else if( this->owner == thrd) { 101 // We already have the monitor, just note how many times we took it 102 this->recursion += 1; 103 104 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 105 } 106 else if( is_accepted( this, group) ) { 107 // Some one was waiting for us, enter 108 set_owner( this, thrd ); 109 110 // Reset mask 111 reset_mask( this ); 112 113 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 114 } 115 else { 116 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 117 118 // Some one else has the monitor, wait in line for it 119 append( this->entry_queue, thrd ); 120 121 BlockInternal( &this->lock ); 122 123 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 124 125 // BlockInternal will unlock spinlock, no need to unlock ourselves 126 return; 127 } 82 // Enter single monitor 83 static void __enter( $monitor * this, const __monitor_group_t & group ) { 84 // Lock the monitor spinlock 85 lock( this->lock __cfaabi_dbg_ctx2 ); 86 // Interrupts disable inside critical section 87 $thread * thrd = kernelTLS.this_thread; 88 89 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 90 91 if( !this->owner ) { 92 // No one has the monitor, just take it 93 __set_owner( this, thrd ); 94 95 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 96 } 97 else if( this->owner == thrd) { 98 // We already have the monitor, just note how many times we took it 99 this->recursion += 1; 100 101 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 102 } 103 else if( is_accepted( this, group) ) { 104 // Some one was waiting for us, enter 105 __set_owner( this, thrd ); 106 107 // Reset mask 108 reset_mask( this ); 109 110 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 111 } 112 else { 113 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 114 115 // Some one else has the monitor, wait in line for it 116 /* paranoid */ verify( thrd->next == 0p ); 117 append( this->entry_queue, thrd ); 118 /* paranoid */ verify( thrd->next == 1p ); 119 120 unlock( this->lock ); 121 park(); 128 122 129 123 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 130 124 131 // Release the lock and leave 125 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 126 return; 127 } 128 129 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 130 131 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 132 /* paranoid */ verify( this->lock.lock ); 133 134 // Release the lock and leave 135 unlock( this->lock ); 136 return; 137 } 138 139 static void __dtor_enter( $monitor * this, fptr_t func ) { 140 // Lock the monitor spinlock 141 lock( this->lock __cfaabi_dbg_ctx2 ); 142 // Interrupts disable inside critical section 143 $thread * thrd = kernelTLS.this_thread; 144 145 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 146 147 148 if( !this->owner ) { 149 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 150 151 // No one has the monitor, just take it 152 __set_owner( this, thrd ); 153 154 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 155 132 156 unlock( this->lock ); 133 157 return; 134 158 } 135 136 static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) { 137 // Lock the monitor spinlock 138 lock( this->lock __cfaabi_dbg_ctx2 ); 139 // Interrupts disable inside critical section 140 thread_desc * thrd = kernelTLS.this_thread; 141 142 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 143 144 145 if( !this->owner ) { 146 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 147 148 // No one has the monitor, just take it 149 set_owner( this, thrd ); 150 151 unlock( this->lock ); 152 return; 159 else if( this->owner == thrd) { 160 // We already have the monitor... but where about to destroy it so the nesting will fail 161 // Abort! 162 abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd ); 163 } 164 165 __lock_size_t count = 1; 166 $monitor ** monitors = &this; 167 __monitor_group_t group = { &this, 1, func }; 168 if( is_accepted( this, group) ) { 169 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 170 171 // Wake the thread that is waiting for this 172 __condition_criterion_t * urgent = pop( this->signal_stack ); 173 /* paranoid */ verify( urgent ); 174 175 // Reset mask 176 reset_mask( this ); 177 178 // Create the node specific to this wait operation 179 wait_ctx_primed( thrd, 0 ) 180 181 // Some one else has the monitor, wait for him to finish and then run 182 unlock( this->lock ); 183 184 // Release the next thread 185 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 186 unpark( urgent->owner->waiting_thread ); 187 188 // Park current thread waiting 189 park(); 190 191 // Some one was waiting for us, enter 192 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 193 } 194 else { 195 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 196 197 wait_ctx( thrd, 0 ) 198 this->dtor_node = &waiter; 199 200 // Some one else has the monitor, wait in line for it 201 /* paranoid */ verify( thrd->next == 0p ); 202 append( this->entry_queue, thrd ); 203 /* paranoid */ verify( thrd->next == 1p ); 204 unlock( this->lock ); 205 206 // Park current thread waiting 207 park(); 208 209 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 210 return; 211 } 212 213 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 214 215 } 216 217 // Leave single monitor 218 void __leave( $monitor * this ) { 219 // Lock the monitor spinlock 220 lock( this->lock __cfaabi_dbg_ctx2 ); 221 222 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 223 224 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 225 226 // Leaving a recursion level, decrement the counter 227 this->recursion -= 1; 228 229 // If we haven't left the last level of recursion 230 // it means we don't need to do anything 231 if( this->recursion != 0) { 232 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 233 unlock( this->lock ); 234 return; 235 } 236 237 // Get the next thread, will be null on low contention monitor 238 $thread * new_owner = next_thread( this ); 239 240 // Check the new owner is consistent with who we wake-up 241 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor 242 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 243 244 // We can now let other threads in safely 245 unlock( this->lock ); 246 247 //We need to wake-up the thread 248 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 249 unpark( new_owner ); 250 } 251 252 // Leave single monitor for the last time 253 void __dtor_leave( $monitor * this ) { 254 __cfaabi_dbg_debug_do( 255 if( TL_GET( this_thread ) != this->owner ) { 256 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 153 257 } 154 else if( this->owner == thrd) { 155 // We already have the monitor... but where about to destroy it so the nesting will fail 156 // Abort! 157 abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd ); 258 if( this->recursion != 1 ) { 259 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 158 260 } 159 160 __lock_size_t count = 1; 161 monitor_desc ** monitors = &this; 162 __monitor_group_t group = { &this, 1, func }; 163 if( is_accepted( this, group) ) { 164 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 165 166 // Wake the thread that is waiting for this 167 __condition_criterion_t * urgent = pop( this->signal_stack ); 168 verify( urgent ); 169 170 // Reset mask 171 reset_mask( this ); 172 173 // Create the node specific to this wait operation 174 wait_ctx_primed( thrd, 0 ) 175 176 // Some one else has the monitor, wait for him to finish and then run 177 BlockInternal( &this->lock, urgent->owner->waiting_thread ); 178 179 // Some one was waiting for us, enter 180 set_owner( this, thrd ); 181 } 182 else { 183 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 184 185 wait_ctx( thrd, 0 ) 186 this->dtor_node = &waiter; 187 188 // Some one else has the monitor, wait in line for it 189 append( this->entry_queue, thrd ); 190 BlockInternal( &this->lock ); 191 192 // BlockInternal will unlock spinlock, no need to unlock ourselves 193 return; 194 } 195 196 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 197 198 } 199 200 // Leave single monitor 201 void __leave_monitor_desc( monitor_desc * this ) { 202 // Lock the monitor spinlock 203 lock( this->lock __cfaabi_dbg_ctx2 ); 204 205 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 206 207 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 208 209 // Leaving a recursion level, decrement the counter 210 this->recursion -= 1; 211 212 // If we haven't left the last level of recursion 213 // it means we don't need to do anything 214 if( this->recursion != 0) { 215 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 216 unlock( this->lock ); 217 return; 218 } 219 220 // Get the next thread, will be null on low contention monitor 221 thread_desc * new_owner = next_thread( this ); 222 223 // We can now let other threads in safely 224 unlock( this->lock ); 225 226 //We need to wake-up the thread 227 WakeThread( new_owner ); 228 } 229 230 // Leave single monitor for the last time 231 void __leave_dtor_monitor_desc( monitor_desc * this ) { 232 __cfaabi_dbg_debug_do( 233 if( TL_GET( this_thread ) != this->owner ) { 234 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 235 } 236 if( this->recursion != 1 ) { 237 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 238 } 239 ) 240 } 241 261 ) 262 } 263 264 extern "C" { 242 265 // Leave the thread monitor 243 266 // last routine called by a thread. 244 267 // Should never return 245 void __leave_thread_monitor( thread_desc * thrd ) { 246 monitor_desc * this = &thrd->self_mon; 268 void __cfactx_thrd_leave() { 269 $thread * thrd = TL_GET( this_thread ); 270 $monitor * this = &thrd->self_mon; 247 271 248 272 // Lock the monitor now … … 251 275 disable_interrupts(); 252 276 253 thrd->s elf_cor.state = Halted;254 255 verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this );277 thrd->state = Halted; 278 279 /* paranoid */ verifyf( thrd == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", thrd, this->owner, this->recursion, this ); 256 280 257 281 // Leaving a recursion level, decrement the counter … … 263 287 264 288 // Fetch the next thread, can be null 265 thread_desc * new_owner = next_thread( this ); 266 267 // Leave the thread, this will unlock the spinlock 268 // Use leave thread instead of BlockInternal which is 269 // specialized for this case and supports null new_owner 270 LeaveThread( &this->lock, new_owner ); 289 $thread * new_owner = next_thread( this ); 290 291 // Release the monitor lock 292 unlock( this->lock ); 293 294 // Unpark the next owner if needed 295 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 296 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 297 /* paranoid */ verify( ! kernelTLS.this_processor->destroyer ); 298 /* paranoid */ verify( thrd->state == Halted ); 299 300 kernelTLS.this_processor->destroyer = new_owner; 301 302 // Leave the thread 303 __leave_thread(); 271 304 272 305 // Control flow should never reach here! … … 278 311 static inline void enter( __monitor_group_t monitors ) { 279 312 for( __lock_size_t i = 0; i < monitors.size; i++) { 280 __enter _monitor_desc( monitors[i], monitors );313 __enter( monitors[i], monitors ); 281 314 } 282 315 } … … 284 317 // Leave multiple monitor 285 318 // relies on the monitor array being sorted 286 static inline void leave( monitor_desc* monitors [], __lock_size_t count) {319 static inline void leave($monitor * monitors [], __lock_size_t count) { 287 320 for( __lock_size_t i = count - 1; i >= 0; i--) { 288 __leave _monitor_desc( monitors[i] );321 __leave( monitors[i] ); 289 322 } 290 323 } … … 292 325 // Ctor for monitor guard 293 326 // Sorts monitors before entering 294 void ?{}( monitor_guard_t & this, monitor_desc* m [], __lock_size_t count, fptr_t func ) {295 thread_desc* thrd = TL_GET( this_thread );327 void ?{}( monitor_guard_t & this, $monitor * m [], __lock_size_t count, fptr_t func ) { 328 $thread * thrd = TL_GET( this_thread ); 296 329 297 330 // Store current array … … 333 366 // Ctor for monitor guard 334 367 // Sorts monitors before entering 335 void ?{}( monitor_dtor_guard_t & this, monitor_desc* m [], fptr_t func ) {368 void ?{}( monitor_dtor_guard_t & this, $monitor * m [], fptr_t func ) { 336 369 // optimization 337 thread_desc* thrd = TL_GET( this_thread );370 $thread * thrd = TL_GET( this_thread ); 338 371 339 372 // Store current array … … 346 379 (thrd->monitors){m, 1, func}; 347 380 348 __ enter_monitor_dtor( this.m, func );381 __dtor_enter( this.m, func ); 349 382 } 350 383 … … 352 385 void ^?{}( monitor_dtor_guard_t & this ) { 353 386 // Leave the monitors in order 354 __ leave_dtor_monitor_desc( this.m );387 __dtor_leave( this.m ); 355 388 356 389 // Restore thread context … … 360 393 //----------------------------------------------------------------------------- 361 394 // Internal scheduling types 362 void ?{}(__condition_node_t & this, thread_desc* waiting_thread, __lock_size_t count, uintptr_t user_info ) {395 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 363 396 this.waiting_thread = waiting_thread; 364 397 this.count = count; 365 this.next = NULL;398 this.next = 0p; 366 399 this.user_info = user_info; 367 400 } … … 369 402 void ?{}(__condition_criterion_t & this ) with( this ) { 370 403 ready = false; 371 target = NULL;372 owner = NULL;373 next = NULL;374 } 375 376 void ?{}(__condition_criterion_t & this, monitor_desc* target, __condition_node_t & owner ) {404 target = 0p; 405 owner = 0p; 406 next = 0p; 407 } 408 409 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t & owner ) { 377 410 this.ready = false; 378 411 this.target = target; 379 412 this.owner = &owner; 380 this.next = NULL;413 this.next = 0p; 381 414 } 382 415 … … 387 420 388 421 // Check that everything is as expected 389 assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );422 assertf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors ); 390 423 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count ); 391 424 verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count ); … … 399 432 // Append the current wait operation to the ones already queued on the condition 400 433 // We don't need locks for that since conditions must always be waited on inside monitor mutual exclusion 434 /* paranoid */ verify( waiter.next == 0p ); 401 435 append( this.blocked, &waiter ); 436 /* paranoid */ verify( waiter.next == 1p ); 402 437 403 438 // Lock all monitors (aggregates the locks as well) … … 406 441 // Find the next thread(s) to run 407 442 __lock_size_t thread_count = 0; 408 thread_desc* threads[ count ];443 $thread * threads[ count ]; 409 444 __builtin_memset( threads, 0, sizeof( threads ) ); 410 445 … … 414 449 // Remove any duplicate threads 415 450 for( __lock_size_t i = 0; i < count; i++) { 416 thread_desc* new_owner = next_thread( monitors[i] );451 $thread * new_owner = next_thread( monitors[i] ); 417 452 insert_unique( threads, thread_count, new_owner ); 418 453 } 419 454 455 // Unlock the locks, we don't need them anymore 456 for(int i = 0; i < count; i++) { 457 unlock( *locks[i] ); 458 } 459 460 // Wake the threads 461 for(int i = 0; i < thread_count; i++) { 462 unpark( threads[i] ); 463 } 464 420 465 // Everything is ready to go to sleep 421 BlockInternal( locks, count, threads, thread_count);466 park(); 422 467 423 468 // We are back, restore the owners and recursions … … 434 479 //Some more checking in debug 435 480 __cfaabi_dbg_debug_do( 436 thread_desc* this_thrd = TL_GET( this_thread );481 $thread * this_thrd = TL_GET( this_thread ); 437 482 if ( this.monitor_count != this_thrd->monitors.size ) { 438 483 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 449 494 450 495 // Lock all monitors 451 lock_all( this.monitors, NULL, count );496 lock_all( this.monitors, 0p, count ); 452 497 453 498 //Pop the head of the waiting queue … … 471 516 472 517 //Check that everything is as expected 473 verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );518 verifyf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors ); 474 519 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count ); 475 520 … … 488 533 489 534 //Find the thread to run 490 thread_desc * signallee = pop_head( this.blocked )->waiting_thread; 491 set_owner( monitors, count, signallee ); 535 $thread * signallee = pop_head( this.blocked )->waiting_thread; 536 /* paranoid */ verify( signallee->next == 0p ); 537 __set_owner( monitors, count, signallee ); 492 538 493 539 __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee ); 494 540 541 // unlock all the monitors 542 unlock_all( locks, count ); 543 544 // unpark the thread we signalled 545 unpark( signallee ); 546 495 547 //Everything is ready to go to sleep 496 BlockInternal( locks, count, &signallee, 1);548 park(); 497 549 498 550 … … 535 587 // Create one! 536 588 __lock_size_t max = count_max( mask ); 537 monitor_desc* mon_storage[max];589 $monitor * mon_storage[max]; 538 590 __builtin_memset( mon_storage, 0, sizeof( mon_storage ) ); 539 591 __lock_size_t actual_count = aggregate( mon_storage, mask ); … … 553 605 { 554 606 // Check if the entry queue 555 thread_desc* next; int index;607 $thread * next; int index; 556 608 [next, index] = search_entry_queue( mask, monitors, count ); 557 609 … … 563 615 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 564 616 565 monitor_desc* mon2dtor = accepted[0];617 $monitor * mon2dtor = accepted[0]; 566 618 verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." ); 567 619 … … 589 641 590 642 // Set the owners to be the next thread 591 set_owner( monitors, count, next ); 592 593 // Everything is ready to go to sleep 594 BlockInternal( locks, count, &next, 1 ); 643 __set_owner( monitors, count, next ); 644 645 // unlock all the monitors 646 unlock_all( locks, count ); 647 648 // unpark the thread we signalled 649 unpark( next ); 650 651 //Everything is ready to go to sleep 652 park(); 595 653 596 654 // We are back, restore the owners and recursions … … 630 688 } 631 689 690 // unlock all the monitors 691 unlock_all( locks, count ); 692 632 693 //Everything is ready to go to sleep 633 BlockInternal( locks, count);694 park(); 634 695 635 696 … … 648 709 // Utilities 649 710 650 static inline void set_owner( monitor_desc * this, thread_desc* owner ) {651 / / __cfaabi_dbg_print_safe( "Kernal : Setting owner of %p to %p ( was %p)\n", this, owner, this->owner);711 static inline void __set_owner( $monitor * this, $thread * owner ) { 712 /* paranoid */ verify( this->lock.lock ); 652 713 653 714 //Pass the monitor appropriately … … 658 719 } 659 720 660 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) { 661 monitors[0]->owner = owner; 662 monitors[0]->recursion = 1; 721 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread * owner ) { 722 /* paranoid */ verify ( monitors[0]->lock.lock ); 723 /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] ); 724 monitors[0]->owner = owner; 725 monitors[0]->recursion = 1; 663 726 for( __lock_size_t i = 1; i < count; i++ ) { 664 monitors[i]->owner = owner; 665 monitors[i]->recursion = 0; 666 } 667 } 668 669 static inline void set_mask( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 727 /* paranoid */ verify ( monitors[i]->lock.lock ); 728 /* paranoid */ verifyf( monitors[i]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[i]->owner, monitors[i]->recursion, monitors[i] ); 729 monitors[i]->owner = owner; 730 monitors[i]->recursion = 0; 731 } 732 } 733 734 static inline void set_mask( $monitor * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 670 735 for( __lock_size_t i = 0; i < count; i++) { 671 736 storage[i]->mask = mask; … … 673 738 } 674 739 675 static inline void reset_mask( monitor_desc* this ) {676 this->mask.accepted = NULL;677 this->mask.data = NULL;740 static inline void reset_mask( $monitor * this ) { 741 this->mask.accepted = 0p; 742 this->mask.data = 0p; 678 743 this->mask.size = 0; 679 744 } 680 745 681 static inline thread_desc * next_thread( monitor_desc* this ) {746 static inline $thread * next_thread( $monitor * this ) { 682 747 //Check the signaller stack 683 748 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); … … 687 752 //regardless of if we are ready to baton pass, 688 753 //we need to set the monitor as in use 689 set_owner( this, urgent->owner->waiting_thread ); 754 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 755 __set_owner( this, urgent->owner->waiting_thread ); 690 756 691 757 return check_condition( urgent ); … … 694 760 // No signaller thread 695 761 // Get the next thread in the entry_queue 696 thread_desc * new_owner = pop_head( this->entry_queue ); 697 set_owner( this, new_owner ); 762 $thread * new_owner = pop_head( this->entry_queue ); 763 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 764 /* paranoid */ verify( !new_owner || new_owner->next == 0p ); 765 __set_owner( this, new_owner ); 698 766 699 767 return new_owner; 700 768 } 701 769 702 static inline bool is_accepted( monitor_desc* this, const __monitor_group_t & group ) {770 static inline bool is_accepted( $monitor * this, const __monitor_group_t & group ) { 703 771 __acceptable_t * it = this->mask.data; // Optim 704 772 __lock_size_t count = this->mask.size; … … 722 790 } 723 791 724 static inline void init( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {792 static inline void init( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 725 793 for( __lock_size_t i = 0; i < count; i++) { 726 794 (criteria[i]){ monitors[i], waiter }; … … 730 798 } 731 799 732 static inline void init_push( __lock_size_t count, monitor_desc* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {800 static inline void init_push( __lock_size_t count, $monitor * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 733 801 for( __lock_size_t i = 0; i < count; i++) { 734 802 (criteria[i]){ monitors[i], waiter }; … … 746 814 } 747 815 748 static inline void lock_all( monitor_desc* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {816 static inline void lock_all( $monitor * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 749 817 for( __lock_size_t i = 0; i < count; i++ ) { 750 818 __spinlock_t * l = &source[i]->lock; … … 760 828 } 761 829 762 static inline void unlock_all( monitor_desc* locks [], __lock_size_t count ) {830 static inline void unlock_all( $monitor * locks [], __lock_size_t count ) { 763 831 for( __lock_size_t i = 0; i < count; i++ ) { 764 832 unlock( locks[i]->lock ); … … 767 835 768 836 static inline void save( 769 monitor_desc* ctx [],837 $monitor * ctx [], 770 838 __lock_size_t count, 771 839 __attribute((unused)) __spinlock_t * locks [], … … 780 848 781 849 static inline void restore( 782 monitor_desc* ctx [],850 $monitor * ctx [], 783 851 __lock_size_t count, 784 852 __spinlock_t * locks [], … … 798 866 // 2 - Checks if all the monitors are ready to run 799 867 // if so return the thread to run 800 static inline thread_desc* check_condition( __condition_criterion_t * target ) {868 static inline $thread * check_condition( __condition_criterion_t * target ) { 801 869 __condition_node_t * node = target->owner; 802 870 unsigned short count = node->count; … … 816 884 } 817 885 818 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL);819 return ready2run ? node->waiting_thread : NULL;886 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p ); 887 return ready2run ? node->waiting_thread : 0p; 820 888 } 821 889 822 890 static inline void brand_condition( condition & this ) { 823 thread_desc* thrd = TL_GET( this_thread );891 $thread * thrd = TL_GET( this_thread ); 824 892 if( !this.monitors ) { 825 893 // __cfaabi_dbg_print_safe( "Branding\n" ); 826 assertf( thrd->monitors.data != NULL, "No current monitor to brand condition %p", thrd->monitors.data );894 assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data ); 827 895 this.monitor_count = thrd->monitors.size; 828 896 829 this.monitors = ( monitor_desc**)malloc( this.monitor_count * sizeof( *this.monitors ) );897 this.monitors = ($monitor **)malloc( this.monitor_count * sizeof( *this.monitors ) ); 830 898 for( int i = 0; i < this.monitor_count; i++ ) { 831 899 this.monitors[i] = thrd->monitors[i]; … … 834 902 } 835 903 836 static inline [ thread_desc *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor_desc* monitors [], __lock_size_t count ) {837 838 __queue_t( thread_desc) & entry_queue = monitors[0]->entry_queue;904 static inline [$thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor * monitors [], __lock_size_t count ) { 905 906 __queue_t($thread) & entry_queue = monitors[0]->entry_queue; 839 907 840 908 // For each thread in the entry-queue 841 for( thread_desc** thrd_it = &entry_queue.head;842 *thrd_it ;909 for( $thread ** thrd_it = &entry_queue.head; 910 *thrd_it != 1p; 843 911 thrd_it = &(*thrd_it)->next 844 912 ) { … … 883 951 } 884 952 885 static inline __lock_size_t aggregate( monitor_desc* storage [], const __waitfor_mask_t & mask ) {953 static inline __lock_size_t aggregate( $monitor * storage [], const __waitfor_mask_t & mask ) { 886 954 __lock_size_t size = 0; 887 955 for( __lock_size_t i = 0; i < mask.size; i++ ) { -
libcfa/src/concurrency/monitor.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Oct 7 18:06:45 201713 // Update Count : 1 012 // Last Modified On : Wed Dec 4 07:55:32 2019 13 // Update Count : 11 14 14 // 15 15 … … 23 23 24 24 trait is_monitor(dtype T) { 25 monitor_desc* get_monitor( T & );25 $monitor * get_monitor( T & ); 26 26 void ^?{}( T & mutex ); 27 27 }; 28 28 29 static inline void ?{}( monitor_desc& this) with( this ) {29 static inline void ?{}($monitor & this) with( this ) { 30 30 lock{}; 31 31 entry_queue{}; 32 32 signal_stack{}; 33 owner = NULL;33 owner = 0p; 34 34 recursion = 0; 35 mask.accepted = NULL;36 mask.data = NULL;35 mask.accepted = 0p; 36 mask.data = 0p; 37 37 mask.size = 0; 38 dtor_node = NULL;38 dtor_node = 0p; 39 39 } 40 40 41 static inline void ^?{}($monitor & ) {} 42 41 43 struct monitor_guard_t { 42 monitor_desc** m;44 $monitor ** m; 43 45 __lock_size_t count; 44 46 __monitor_group_t prev; 45 47 }; 46 48 47 void ?{}( monitor_guard_t & this, monitor_desc** m, __lock_size_t count, void (*func)() );49 void ?{}( monitor_guard_t & this, $monitor ** m, __lock_size_t count, void (*func)() ); 48 50 void ^?{}( monitor_guard_t & this ); 49 51 50 52 struct monitor_dtor_guard_t { 51 monitor_desc* m;53 $monitor * m; 52 54 __monitor_group_t prev; 53 55 }; 54 56 55 void ?{}( monitor_dtor_guard_t & this, monitor_desc** m, void (*func)() );57 void ?{}( monitor_dtor_guard_t & this, $monitor ** m, void (*func)() ); 56 58 void ^?{}( monitor_dtor_guard_t & this ); 57 59 … … 70 72 71 73 // The monitor this criterion concerns 72 monitor_desc* target;74 $monitor * target; 73 75 74 76 // The parent node to which this criterion belongs … … 85 87 struct __condition_node_t { 86 88 // Thread that needs to be woken when all criteria are met 87 thread_desc* waiting_thread;89 $thread * waiting_thread; 88 90 89 91 // Array of criteria (Criterions are contiguous in memory) … … 104 106 } 105 107 106 void ?{}(__condition_node_t & this, thread_desc* waiting_thread, __lock_size_t count, uintptr_t user_info );108 void ?{}(__condition_node_t & this, $thread * waiting_thread, __lock_size_t count, uintptr_t user_info ); 107 109 void ?{}(__condition_criterion_t & this ); 108 void ?{}(__condition_criterion_t & this, monitor_desc* target, __condition_node_t * owner );110 void ?{}(__condition_criterion_t & this, $monitor * target, __condition_node_t * owner ); 109 111 110 112 struct condition { … … 113 115 114 116 // Array of monitor pointers (Monitors are NOT contiguous in memory) 115 monitor_desc** monitors;117 $monitor ** monitors; 116 118 117 119 // Number of monitors in the array … … 120 122 121 123 static inline void ?{}( condition & this ) { 122 this.monitors = NULL;124 this.monitors = 0p; 123 125 this.monitor_count = 0; 124 126 } … … 131 133 bool signal ( condition & this ); 132 134 bool signal_block( condition & this ); 133 static inline bool is_empty ( condition & this ) { return !this.blocked.head; }135 static inline bool is_empty ( condition & this ) { return this.blocked.head == 1p; } 134 136 uintptr_t front ( condition & this ); 135 137 -
libcfa/src/concurrency/mutex.cfa
r9fb8f01 r3d5701e 11 11 // Author : Thierry Delisle 12 12 // Created On : Fri May 25 01:37:11 2018 13 // Last Modified By : Thierry Delisle14 // Last Modified On : Fri May 25 01:37:51 201815 // Update Count : 013 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Wed Dec 4 09:16:39 2019 15 // Update Count : 1 16 16 // 17 17 … … 40 40 if( is_locked ) { 41 41 append( blocked_threads, kernelTLS.this_thread ); 42 BlockInternal( &lock ); 42 unlock( lock ); 43 park(); 43 44 } 44 45 else { … … 62 63 lock( this.lock __cfaabi_dbg_ctx2 ); 63 64 this.is_locked = (this.blocked_threads != 0); 64 WakeThread(65 unpark( 65 66 pop_head( this.blocked_threads ) 66 67 ); … … 73 74 this.lock{}; 74 75 this.blocked_threads{}; 75 this.owner = NULL;76 this.owner = 0p; 76 77 this.recursion_count = 0; 77 78 } … … 83 84 void lock(recursive_mutex_lock & this) with(this) { 84 85 lock( lock __cfaabi_dbg_ctx2 ); 85 if( owner == NULL) {86 if( owner == 0p ) { 86 87 owner = kernelTLS.this_thread; 87 88 recursion_count = 1; … … 94 95 else { 95 96 append( blocked_threads, kernelTLS.this_thread ); 96 BlockInternal( &lock ); 97 unlock( lock ); 98 park(); 97 99 } 98 100 } … … 101 103 bool ret = false; 102 104 lock( lock __cfaabi_dbg_ctx2 ); 103 if( owner == NULL) {105 if( owner == 0p ) { 104 106 owner = kernelTLS.this_thread; 105 107 recursion_count = 1; … … 118 120 recursion_count--; 119 121 if( recursion_count == 0 ) { 120 thread_desc* thrd = pop_head( blocked_threads );122 $thread * thrd = pop_head( blocked_threads ); 121 123 owner = thrd; 122 124 recursion_count = (thrd ? 1 : 0); 123 WakeThread( thrd );125 unpark( thrd ); 124 126 } 125 127 unlock( lock ); … … 138 140 void notify_one(condition_variable & this) with(this) { 139 141 lock( lock __cfaabi_dbg_ctx2 ); 140 WakeThread(142 unpark( 141 143 pop_head( this.blocked_threads ) 142 144 ); … … 147 149 lock( lock __cfaabi_dbg_ctx2 ); 148 150 while(this.blocked_threads) { 149 WakeThread(151 unpark( 150 152 pop_head( this.blocked_threads ) 151 153 ); … … 157 159 lock( this.lock __cfaabi_dbg_ctx2 ); 158 160 append( this.blocked_threads, kernelTLS.this_thread ); 159 BlockInternal( &this.lock ); 161 unlock( this.lock ); 162 park(); 160 163 } 161 164 … … 164 167 lock( this.lock __cfaabi_dbg_ctx2 ); 165 168 append( this.blocked_threads, kernelTLS.this_thread ); 166 void __unlock(void) { 167 unlock(l); 168 unlock(this.lock); 169 } 170 BlockInternal( __unlock ); 169 unlock(l); 170 unlock(this.lock); 171 park(); 171 172 lock(l); 172 173 } -
libcfa/src/concurrency/mutex.hfa
r9fb8f01 r3d5701e 11 11 // Author : Thierry Delisle 12 12 // Created On : Fri May 25 01:24:09 2018 13 // Last Modified By : Thierry Delisle14 // Last Modified On : Fri May 25 01:24:12 201815 // Update Count : 013 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Wed Dec 4 09:16:53 2019 15 // Update Count : 1 16 16 // 17 17 … … 36 36 37 37 // List of blocked threads 38 __queue_t(struct thread_desc) blocked_threads;38 __queue_t(struct $thread) blocked_threads; 39 39 40 40 // Locked flag … … 55 55 56 56 // List of blocked threads 57 __queue_t(struct thread_desc) blocked_threads;57 __queue_t(struct $thread) blocked_threads; 58 58 59 59 // Current thread owning the lock 60 struct thread_desc* owner;60 struct $thread * owner; 61 61 62 62 // Number of recursion level … … 83 83 84 84 // List of blocked threads 85 __queue_t(struct thread_desc) blocked_threads;85 __queue_t(struct $thread) blocked_threads; 86 86 }; 87 87 … … 110 110 111 111 static inline void ?{}(lock_scope(L) & this) { 112 this.locks = NULL;112 this.locks = 0p; 113 113 this.count = 0; 114 114 } -
libcfa/src/concurrency/preemption.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jun 5 17:35:49 201813 // Update Count : 3712 // Last Modified On : Thu Dec 5 16:34:05 2019 13 // Update Count : 43 14 14 // 15 15 … … 24 24 #include <string.h> 25 25 #include <unistd.h> 26 #include <limits.h> // PTHREAD_STACK_MIN 26 27 } 27 28 … … 38 39 // FwdDeclarations : timeout handlers 39 40 static void preempt( processor * this ); 40 static void timeout( thread_desc* this );41 static void timeout( $thread * this ); 41 42 42 43 // FwdDeclarations : Signal handlers … … 64 65 event_kernel_t * event_kernel; // kernel public handle to even kernel 65 66 static pthread_t alarm_thread; // pthread handle to alarm thread 67 static void * alarm_stack; // pthread stack for alarm thread 66 68 67 69 static void ?{}(event_kernel_t & this) with( this ) { … … 81 83 // Get next expired node 82 84 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 83 if( !alarms->head ) return NULL;// If no alarms return null84 if( alarms->head->alarm >= currtime ) return NULL;// If alarms head not expired return null85 return pop(alarms); // Otherwise just pop head85 if( !alarms->head ) return 0p; // If no alarms return null 86 if( alarms->head->alarm >= currtime ) return 0p; // If alarms head not expired return null 87 return pop(alarms); // Otherwise just pop head 86 88 } 87 89 88 90 // Tick one frame of the Discrete Event Simulation for alarms 89 91 static void tick_preemption() { 90 alarm_node_t * node = NULL;// Used in the while loop but cannot be declared in the while condition91 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading92 Time currtime = __kernel_get_time(); // Check current time once so weeverything "happens at once"92 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 93 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading 94 Time currtime = __kernel_get_time(); // Check current time once so everything "happens at once" 93 95 94 96 //Loop throught every thing expired … … 182 184 183 185 // Enable interrupts by decrementing the counter 184 // If counter reaches 0, execute any pending CtxSwitch186 // If counter reaches 0, execute any pending __cfactx_switch 185 187 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 186 188 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 187 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store188 189 189 190 with( kernelTLS.preemption_state ){ … … 207 208 if( proc->pending_preemption ) { 208 209 proc->pending_preemption = false; 209 BlockInternal( thrd);210 force_yield( __POLL_PREEMPTION ); 210 211 } 211 212 } … … 217 218 218 219 // Disable interrupts by incrementint the counter 219 // Don't execute any pending CtxSwitch even if counter reaches 0220 // Don't execute any pending __cfactx_switch even if counter reaches 0 220 221 void enable_interrupts_noPoll() { 221 222 unsigned short prev = kernelTLS.preemption_state.disable_count; … … 243 244 sigaddset( &mask, sig ); 244 245 245 if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL) == -1 ) {246 if ( pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) { 246 247 abort( "internal error, pthread_sigmask" ); 247 248 } … … 254 255 sigaddset( &mask, sig ); 255 256 256 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL) == -1 ) {257 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 257 258 abort( "internal error, pthread_sigmask" ); 258 259 } … … 266 267 267 268 // reserved for future use 268 static void timeout( thread_desc* this ) {269 static void timeout( $thread * this ) { 269 270 //TODO : implement waking threads 270 271 } 271 272 272 273 // KERNEL ONLY 273 // Check if a CtxSwitch signal handler shoud defer274 // Check if a __cfactx_switch signal handler shoud defer 274 275 // If true : preemption is safe 275 276 // If false : preemption is unsafe and marked as pending … … 301 302 302 303 // Setup proper signal handlers 303 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler304 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler 304 305 305 306 signal_block( SIGALRM ); 306 307 307 pthread_create( &alarm_thread, NULL, alarm_loop, NULL);308 alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p ); 308 309 } 309 310 … … 316 317 sigset_t mask; 317 318 sigfillset( &mask ); 318 sigprocmask( SIG_BLOCK, &mask, NULL);319 sigprocmask( SIG_BLOCK, &mask, 0p ); 319 320 320 321 // Notify the alarm thread of the shutdown … … 323 324 324 325 // Wait for the preemption thread to finish 325 pthread_join( alarm_thread, NULL ); 326 327 pthread_join( alarm_thread, 0p ); 328 free( alarm_stack ); 326 329 327 330 // Preemption is now fully stopped … … 380 383 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 381 384 #endif 382 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL) == -1 ) {385 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) { 383 386 abort( "internal error, sigprocmask" ); 384 387 } … … 390 393 // Preemption can occur here 391 394 392 BlockInternal( kernelTLS.this_thread ); // Do the actual CtxSwitch395 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch 393 396 } 394 397 … … 399 402 sigset_t mask; 400 403 sigfillset(&mask); 401 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL) == -1 ) {404 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 402 405 abort( "internal error, pthread_sigmask" ); 403 406 } … … 420 423 {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );} 421 424 continue; 422 case EINVAL :425 case EINVAL : 423 426 abort( "Timeout was invalid." ); 424 427 default: … … 453 456 EXIT: 454 457 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 455 return NULL;458 return 0p; 456 459 } 457 460 … … 466 469 sigset_t oldset; 467 470 int ret; 468 ret = pthread_sigmask(0, NULL, &oldset);471 ret = pthread_sigmask(0, 0p, &oldset); 469 472 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 470 473 -
libcfa/src/concurrency/thread.cfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 17:19:52 201813 // Update Count : 812 // Last Modified On : Wed Dec 4 09:17:49 2019 13 // Update Count : 9 14 14 // 15 15 … … 23 23 #include "invoke.h" 24 24 25 extern "C" {26 #include <fenv.h>27 #include <stddef.h>28 }29 30 //extern volatile thread_local processor * this_processor;31 32 25 //----------------------------------------------------------------------------- 33 26 // Thread ctors and dtors 34 void ?{}( thread_desc& this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {35 context{ NULL, NULL};27 void ?{}($thread & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 28 context{ 0p, 0p }; 36 29 self_cor{ name, storage, storageSize }; 37 30 state = Start; 31 preempted = __NO_PREEMPTION; 38 32 curr_cor = &self_cor; 39 33 self_mon.owner = &this; … … 41 35 self_mon_p = &self_mon; 42 36 curr_cluster = &cl; 43 next = NULL;37 next = 0p; 44 38 45 node.next = NULL;46 node.prev = NULL;39 node.next = 0p; 40 node.prev = 0p; 47 41 doregister(curr_cluster, this); 48 42 … … 50 44 } 51 45 52 void ^?{}( thread_desc& this) with( this ) {46 void ^?{}($thread& this) with( this ) { 53 47 unregister(curr_cluster, this); 54 48 ^self_cor{}; 55 49 } 56 50 51 //----------------------------------------------------------------------------- 52 // Starting and stopping threads 53 forall( dtype T | is_thread(T) ) 54 void __thrd_start( T & this, void (*main_p)(T &) ) { 55 $thread * this_thrd = get_thread(this); 56 57 disable_interrupts(); 58 __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread); 59 60 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 61 verify( this_thrd->context.SP ); 62 63 __schedule_thread(this_thrd); 64 enable_interrupts( __cfaabi_dbg_ctx ); 65 } 66 67 //----------------------------------------------------------------------------- 68 // Support for threads that don't ues the thread keyword 57 69 forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } ) 58 70 void ?{}( scoped(T)& this ) with( this ) { 59 71 handle{}; 60 __thrd_start(handle );72 __thrd_start(handle, main); 61 73 } 62 74 … … 64 76 void ?{}( scoped(T)& this, P params ) with( this ) { 65 77 handle{ params }; 66 __thrd_start(handle );78 __thrd_start(handle, main); 67 79 } 68 80 … … 72 84 } 73 85 74 //-----------------------------------------------------------------------------75 // Starting and stopping threads76 forall( dtype T | is_thread(T) )77 void __thrd_start( T& this ) {78 thread_desc * this_thrd = get_thread(this);79 thread_desc * curr_thrd = TL_GET( this_thread );80 81 disable_interrupts();82 CtxStart(&this, CtxInvokeThread);83 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];84 verify( this_thrd->context.SP );85 CtxSwitch( &curr_thrd->context, &this_thrd->context );86 87 ScheduleThread(this_thrd);88 enable_interrupts( __cfaabi_dbg_ctx );89 }90 91 extern "C" {92 // KERNEL ONLY93 void __finish_creation(thread_desc * this) {94 // set new coroutine that the processor is executing95 // and context switch to it96 verify( kernelTLS.this_thread != this );97 verify( kernelTLS.this_thread->context.SP );98 CtxSwitch( &this->context, &kernelTLS.this_thread->context );99 }100 }101 102 void yield( void ) {103 // Safety note : This could cause some false positives due to preemption104 verify( TL_GET( preemption_state.enabled ) );105 BlockInternal( TL_GET( this_thread ) );106 // Safety note : This could cause some false positives due to preemption107 verify( TL_GET( preemption_state.enabled ) );108 }109 110 void yield( unsigned times ) {111 for( unsigned i = 0; i < times; i++ ) {112 yield();113 }114 }115 116 86 // Local Variables: // 117 87 // mode: c // -
libcfa/src/concurrency/thread.hfa
r9fb8f01 r3d5701e 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 21 17:51:33201913 // Update Count : 512 // Last Modified On : Wed Dec 4 09:18:14 2019 13 // Update Count : 6 14 14 // 15 15 … … 28 28 void ^?{}(T& mutex this); 29 29 void main(T& this); 30 thread_desc* get_thread(T& this);30 $thread* get_thread(T& this); 31 31 }; 32 32 33 #define DECL_THREAD(X) thread_desc* get_thread(X& this) { return &this.__thrd; } void main(X& this) 33 // define that satisfies the trait without using the thread keyword 34 #define DECL_THREAD(X) $thread* get_thread(X& this) __attribute__((const)) { return &this.__thrd; } void main(X& this) 35 36 // Inline getters for threads/coroutines/monitors 37 forall( dtype T | is_thread(T) ) 38 static inline $coroutine* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; } 34 39 35 40 forall( dtype T | is_thread(T) ) 36 static inline coroutine_desc* get_coroutine(T & this) { 37 return &get_thread(this)->self_cor; 38 } 41 static inline $monitor * get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; } 39 42 40 forall( dtype T | is_thread(T) ) 41 static inline monitor_desc* get_monitor(T & this) { 42 return &get_thread(this)->self_mon; 43 } 43 static inline $coroutine* get_coroutine($thread * this) __attribute__((const)) { return &this->self_cor; } 44 static inline $monitor * get_monitor ($thread * this) __attribute__((const)) { return &this->self_mon; } 44 45 45 static inline coroutine_desc* get_coroutine(thread_desc * this) { 46 return &this->self_cor; 47 } 48 49 static inline monitor_desc* get_monitor(thread_desc * this) { 50 return &this->self_mon; 51 } 52 46 //----------------------------------------------------------------------------- 47 // forward declarations needed for threads 53 48 extern struct cluster * mainCluster; 54 49 55 50 forall( dtype T | is_thread(T) ) 56 void __thrd_start( T & this );51 void __thrd_start( T & this, void (*)(T &) ); 57 52 58 53 //----------------------------------------------------------------------------- 59 54 // Ctors and dtors 60 void ?{}( thread_desc& this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );61 void ^?{}( thread_desc& this);55 void ?{}($thread & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize ); 56 void ^?{}($thread & this); 62 57 63 static inline void ?{}( thread_desc & this) { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; }64 static inline void ?{}( thread_desc & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }65 static inline void ?{}( thread_desc& this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }66 static inline void ?{}( thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, NULL, 65000 }; }67 static inline void ?{}( thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, NULL, stackSize }; }68 static inline void ?{}( thread_desc& this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; }69 static inline void ?{}( thread_desc & this, const char * const name) { this{ name, *mainCluster, NULL, 65000 }; }70 static inline void ?{}( thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, NULL, 65000 }; }71 static inline void ?{}( thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }58 static inline void ?{}($thread & this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; } 59 static inline void ?{}($thread & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; } 60 static inline void ?{}($thread & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 61 static inline void ?{}($thread & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; } 62 static inline void ?{}($thread & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; } 63 static inline void ?{}($thread & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 64 static inline void ?{}($thread & this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; } 65 static inline void ?{}($thread & this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; } 66 static inline void ?{}($thread & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; } 72 67 73 68 //----------------------------------------------------------------------------- … … 88 83 void ^?{}( scoped(T)& this ); 89 84 90 void yield(); 91 void yield( unsigned times ); 85 //----------------------------------------------------------------------------- 86 // Thread getters 87 static inline struct $thread * active_thread () { return TL_GET( this_thread ); } 92 88 93 static inline struct thread_desc * active_thread () { return TL_GET( this_thread ); } 89 //----------------------------------------------------------------------------- 90 // Scheduler API 91 92 //---------- 93 // Park thread: block until corresponding call to unpark, won't block if unpark is already called 94 void park( void ); 95 96 //---------- 97 // Unpark a thread, if the thread is already blocked, schedule it 98 // if the thread is not yet block, signal that it should rerun immediately 99 void unpark( $thread * this ); 100 101 forall( dtype T | is_thread(T) ) 102 static inline void unpark( T & this ) { if(!&this) return; unpark( get_thread( this ) );} 103 104 //---------- 105 // Yield: force thread to block and be rescheduled 106 bool force_yield( enum __Preemption_Reason ); 107 108 static inline void yield() { 109 force_yield(__MANUAL_PREEMPTION); 110 } 111 112 // Yield: yield N times 113 static inline void yield( unsigned times ) { 114 for( times ) { 115 yield(); 116 } 117 } 94 118 95 119 // Local Variables: //
Note:
See TracChangeset
for help on using the changeset viewer.