Changeset c7a900a
- Timestamp:
- Feb 21, 2020, 5:31:19 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- a8078ee
- Parents:
- a505021
- Location:
- libcfa/src/concurrency
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/CtxSwitch-arm.S
ra505021 rc7a900a 13 13 .text 14 14 .align 2 15 .global CtxSwitch16 .type CtxSwitch, %function15 .global __cfactx_switch 16 .type __cfactx_switch, %function 17 17 18 CtxSwitch:18 __cfactx_switch: 19 19 @ save callee-saved registers: r4-r8, r10, r11, r13(sp) (plus r9 depending on platform specification) 20 20 @ I've seen reference to 31 registers on 64-bit, if this is the case, more need to be saved … … 52 52 mov r15, r14 53 53 #endif // R9_SPECIAL 54 54 55 55 .text 56 56 .align 2 57 .global CtxInvokeStub58 .type CtxInvokeStub, %function57 .global __cfactx_invoke_stub 58 .type __cfactx_invoke_stub, %function 59 59 60 CtxInvokeStub:60 __cfactx_invoke_stub: 61 61 ldmfd r13!, {r0-r1} 62 62 mov r15, r1 -
libcfa/src/concurrency/CtxSwitch-i386.S
ra505021 rc7a900a 43 43 .text 44 44 .align 2 45 .globl CtxSwitch46 .type CtxSwitch, @function47 CtxSwitch:45 .globl __cfactx_switch 46 .type __cfactx_switch, @function 47 __cfactx_switch: 48 48 49 49 // Copy the "from" context argument from the stack to register eax … … 83 83 84 84 ret 85 .size CtxSwitch, .-CtxSwitch85 .size __cfactx_switch, .-__cfactx_switch 86 86 87 87 // Local Variables: // -
libcfa/src/concurrency/CtxSwitch-x86_64.S
ra505021 rc7a900a 44 44 .text 45 45 .align 2 46 .globl CtxSwitch47 .type CtxSwitch, @function48 CtxSwitch:46 .globl __cfactx_switch 47 .type __cfactx_switch, @function 48 __cfactx_switch: 49 49 50 50 // Save volatile registers on the stack. … … 77 77 78 78 ret 79 .size CtxSwitch, .-CtxSwitch79 .size __cfactx_switch, .-__cfactx_switch 80 80 81 81 //----------------------------------------------------------------------------- … … 83 83 .text 84 84 .align 2 85 .globl CtxInvokeStub86 .type CtxInvokeStub, @function87 CtxInvokeStub:85 .globl __cfactx_invoke_stub 86 .type __cfactx_invoke_stub, @function 87 __cfactx_invoke_stub: 88 88 movq %rbx, %rdi 89 89 movq %r12, %rsi 90 90 jmp *%r13 91 .size CtxInvokeStub, .-CtxInvokeStub91 .size __cfactx_invoke_stub, .-__cfactx_invoke_stub 92 92 93 93 // Local Variables: // -
libcfa/src/concurrency/coroutine.cfa
ra505021 rc7a900a 187 187 // is not inline (We can't inline Cforall in C) 188 188 extern "C" { 189 void __ leave_coroutine( struct coroutine_desc * src ) {189 void __cfactx_cor_leave( struct coroutine_desc * src ) { 190 190 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter; 191 191 … … 204 204 } 205 205 206 struct coroutine_desc * __ finish_coroutine(void) {206 struct coroutine_desc * __cfactx_cor_finish(void) { 207 207 struct coroutine_desc * cor = kernelTLS.this_thread->curr_cor; 208 208 -
libcfa/src/concurrency/coroutine.hfa
ra505021 rc7a900a 61 61 // Start coroutine routines 62 62 extern "C" { 63 void CtxInvokeCoroutine(void (*main)(void *), void * this);63 void __cfactx_invoke_coroutine(void (*main)(void *), void * this); 64 64 65 65 forall(dtype T) 66 void CtxStart(void (*main)(T &), struct coroutine_desc * cor, T & this, void (*invoke)(void (*main)(void *), void *));66 void __cfactx_start(void (*main)(T &), struct coroutine_desc * cor, T & this, void (*invoke)(void (*main)(void *), void *)); 67 67 68 extern void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__));68 extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__)); 69 69 70 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");70 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); 71 71 } 72 72 … … 82 82 // context switch to specified coroutine 83 83 verify( dst->context.SP ); 84 CtxSwitch( &src->context, &dst->context );85 // when CtxSwitch returns we are back in the src coroutine84 __cfactx_switch( &src->context, &dst->context ); 85 // when __cfactx_switch returns we are back in the src coroutine 86 86 87 87 // set state of new coroutine to active … … 89 89 90 90 if( unlikely(src->cancellation != 0p) ) { 91 _ CtxCoroutine_Unwind(src->cancellation, src);91 __cfactx_coroutine_unwind(src->cancellation, src); 92 92 } 93 93 } … … 130 130 TL_GET( this_thread )->curr_cor = dst; 131 131 __stack_prepare(&dst->stack, 65000); 132 CtxStart(main, dst, cor, CtxInvokeCoroutine);132 __cfactx_start(main, dst, cor, __cfactx_invoke_coroutine); 133 133 TL_GET( this_thread )->curr_cor = src; 134 134 } -
libcfa/src/concurrency/invoke.c
ra505021 rc7a900a 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern void __leave_coroutine ( struct coroutine_desc * ); 32 extern struct coroutine_desc * __finish_coroutine(void); 33 extern void __leave_thread_monitor(); 31 extern struct coroutine_desc * __cfactx_cor_finish(void); 32 extern void __cfactx_cor_leave ( struct coroutine_desc * ); 33 extern void __cfactx_thrd_leave(); 34 34 35 extern void disable_interrupts() OPTIONAL_THREAD; 35 36 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); 36 37 37 void CtxInvokeCoroutine(38 void __cfactx_invoke_coroutine( 38 39 void (*main)(void *), 39 40 void *this 40 41 ) { 41 42 // Finish setting up the coroutine by setting its state 42 struct coroutine_desc * cor = __ finish_coroutine();43 struct coroutine_desc * cor = __cfactx_cor_finish(); 43 44 44 45 // Call the main of the coroutine … … 46 47 47 48 //Final suspend, should never return 48 __ leave_coroutine( cor );49 __cfactx_cor_leave( cor ); 49 50 __cabi_abort( "Resumed dead coroutine" ); 50 51 } 51 52 52 static _Unwind_Reason_Code _ CtxCoroutine_UnwindStop(53 static _Unwind_Reason_Code __cfactx_coroutine_unwindstop( 53 54 __attribute((__unused__)) int version, 54 55 _Unwind_Action actions, … … 61 62 // We finished unwinding the coroutine, 62 63 // leave it 63 __ leave_coroutine( param );64 __cfactx_cor_leave( param ); 64 65 __cabi_abort( "Resumed dead coroutine" ); 65 66 } … … 69 70 } 70 71 71 void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__));72 void _ CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) {73 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, _ CtxCoroutine_UnwindStop, cor );72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) __attribute__ ((__noreturn__)); 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine_desc * cor) { 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); 74 75 printf("UNWIND ERROR %d after force unwind\n", ret); 75 76 abort(); 76 77 } 77 78 78 void CtxInvokeThread(79 void __cfactx_invoke_thread( 79 80 void (*main)(void *), 80 81 void *this … … 93 94 // The order of these 4 operations is very important 94 95 //Final suspend, should never return 95 __ leave_thread_monitor();96 __cfactx_thrd_leave(); 96 97 __cabi_abort( "Resumed dead thread" ); 97 98 } 98 99 99 void CtxStart(100 void __cfactx_start( 100 101 void (*main)(void *), 101 102 struct coroutine_desc * cor, … … 139 140 140 141 fs->dummyReturn = NULL; 141 fs->rturn = CtxInvokeStub;142 fs->rturn = __cfactx_invoke_stub; 142 143 fs->fixedRegisters[0] = main; 143 144 fs->fixedRegisters[1] = this; … … 157 158 struct FakeStack *fs = (struct FakeStack *)cor->context.SP; 158 159 159 fs->intRegs[8] = CtxInvokeStub;160 fs->intRegs[8] = __cfactx_invoke_stub; 160 161 fs->arg[0] = this; 161 162 fs->arg[1] = invoke; -
libcfa/src/concurrency/invoke.h
ra505021 rc7a900a 96 96 97 97 struct coroutine_desc { 98 // context that is switch during a CtxSwitch98 // context that is switch during a __cfactx_switch 99 99 struct __stack_context_t context; 100 100 … … 161 161 struct thread_desc { 162 162 // Core threading fields 163 // context that is switch during a CtxSwitch163 // context that is switch during a __cfactx_switch 164 164 struct __stack_context_t context; 165 165 … … 204 204 } 205 205 206 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) /*__attribute__((const))*/{206 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) __attribute__((const)) { 207 207 return this.node.[next, prev]; 208 208 } … … 254 254 255 255 // assembler routines that performs the context switch 256 extern void CtxInvokeStub( void );257 extern void CtxSwitch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("CtxSwitch");256 extern void __cfactx_invoke_stub( void ); 257 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); 258 258 // void CtxStore ( void * this ) asm ("CtxStore"); 259 259 // void CtxRet ( void * dst ) asm ("CtxRet"); -
libcfa/src/concurrency/kernel.cfa
ra505021 rc7a900a 208 208 } 209 209 210 static void * CtxInvokeProcessor(void * arg);210 static void * __invoke_processor(void * arg); 211 211 212 212 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) { … … 224 224 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this); 225 225 226 this.stack = __create_pthread( &this.kernel_thread, CtxInvokeProcessor, (void *)&this );226 this.stack = __create_pthread( &this.kernel_thread, __invoke_processor, (void *)&this ); 227 227 228 228 __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this); … … 347 347 // set context switch to the thread that the processor is executing 348 348 verify( thrd_dst->context.SP ); 349 CtxSwitch( &proc_cor->context, &thrd_dst->context );350 // when CtxSwitch returns we are back in the processor coroutine349 __cfactx_switch( &proc_cor->context, &thrd_dst->context ); 350 // when __cfactx_switch returns we are back in the processor coroutine 351 351 352 352 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 410 410 #endif 411 411 verify( proc_cor->context.SP ); 412 CtxSwitch( &thrd_src->context, &proc_cor->context );412 __cfactx_switch( &thrd_src->context, &proc_cor->context ); 413 413 #if defined( __i386 ) || defined( __x86_64 ) 414 414 __x87_load; … … 424 424 // This is the entry point for processors (kernel threads) 425 425 // It effectively constructs a coroutine by stealing the pthread stack 426 static void * CtxInvokeProcessor(void * arg) {426 static void * __invoke_processor(void * arg) { 427 427 processor * proc = (processor *) arg; 428 428 kernelTLS.this_processor = proc; … … 502 502 kernelTLS.this_thread->curr_cor = dst; 503 503 __stack_prepare( &dst->stack, 65000 ); 504 CtxStart(main, dst, this->runner, CtxInvokeCoroutine);504 __cfactx_start(main, dst, this->runner, __cfactx_invoke_coroutine); 505 505 506 506 verify( ! kernelTLS.preemption_state.enabled ); … … 514 514 // context switch to specified coroutine 515 515 verify( dst->context.SP ); 516 CtxSwitch( &src->context, &dst->context );517 // when CtxSwitch returns we are back in the src coroutine516 __cfactx_switch( &src->context, &dst->context ); 517 // when __cfactx_switch returns we are back in the src coroutine 518 518 519 519 mainThread->curr_cor = &mainThread->self_cor; … … 535 535 536 536 // context switch to the processor 537 CtxSwitch( &src->context, &dst->context );537 __cfactx_switch( &src->context, &dst->context ); 538 538 } 539 539 … … 729 729 730 730 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 731 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that731 // context. Hence, the main thread does not begin through __cfactx_invoke_thread, like all other threads. The trick here is that 732 732 // mainThread is on the ready queue when this call is made. 733 733 __kernel_first_resume( kernelTLS.this_processor ); -
libcfa/src/concurrency/kernel.hfa
ra505021 rc7a900a 108 108 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } 109 109 110 static inline [processor *&, processor *& ] __get( processor & this ) /*__attribute__((const))*/{ return this.node.[next, prev]; }110 static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; } 111 111 112 112 //----------------------------------------------------------------------------- … … 151 151 static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption()}; } 152 152 153 static inline [cluster *&, cluster *& ] __get( cluster & this ) /*__attribute__((const))*/{ return this.node.[next, prev]; }153 static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; } 154 154 155 155 static inline struct processor * active_processor() __attribute__((const)) { return TL_GET( this_processor ); } // UNSAFE -
libcfa/src/concurrency/kernel_private.hfa
ra505021 rc7a900a 71 71 // Threads 72 72 extern "C" { 73 void CtxInvokeThread(void (*main)(void *), void * this);73 void __cfactx_invoke_thread(void (*main)(void *), void * this); 74 74 } 75 75 -
libcfa/src/concurrency/monitor.cfa
ra505021 rc7a900a 27 27 //----------------------------------------------------------------------------- 28 28 // Forward declarations 29 static inline void set_owner ( monitor_desc * this, thread_desc * owner );30 static inline void set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner );29 static inline void __set_owner ( monitor_desc * this, thread_desc * owner ); 30 static inline void __set_owner ( monitor_desc * storage [], __lock_size_t count, thread_desc * owner ); 31 31 static inline void set_mask ( monitor_desc * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 32 32 static inline void reset_mask( monitor_desc * this ); … … 80 80 //----------------------------------------------------------------------------- 81 81 // Enter/Leave routines 82 83 84 extern "C" { 85 // Enter single monitor 86 static void __enter_monitor_desc( monitor_desc * this, const __monitor_group_t & group ) { 87 // Lock the monitor spinlock 88 lock( this->lock __cfaabi_dbg_ctx2 ); 89 // Interrupts disable inside critical section 90 thread_desc * thrd = kernelTLS.this_thread; 91 92 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 93 94 if( !this->owner ) { 95 // No one has the monitor, just take it 96 set_owner( this, thrd ); 97 98 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 99 } 100 else if( this->owner == thrd) { 101 // We already have the monitor, just note how many times we took it 102 this->recursion += 1; 103 104 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 105 } 106 else if( is_accepted( this, group) ) { 107 // Some one was waiting for us, enter 108 set_owner( this, thrd ); 109 110 // Reset mask 111 reset_mask( this ); 112 113 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 114 } 115 else { 116 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 117 118 // Some one else has the monitor, wait in line for it 119 /* paranoid */ verify( thrd->next == 0p ); 120 append( this->entry_queue, thrd ); 121 /* paranoid */ verify( thrd->next == 1p ); 122 123 unlock( this->lock ); 124 park(); 125 126 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 127 128 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 129 return; 130 } 82 // Enter single monitor 83 static void __enter( monitor_desc * this, const __monitor_group_t & group ) { 84 // Lock the monitor spinlock 85 lock( this->lock __cfaabi_dbg_ctx2 ); 86 // Interrupts disable inside critical section 87 thread_desc * thrd = kernelTLS.this_thread; 88 89 __cfaabi_dbg_print_safe( "Kernel : %10p Entering mon %p (%p)\n", thrd, this, this->owner); 90 91 if( !this->owner ) { 92 // No one has the monitor, just take it 93 __set_owner( this, thrd ); 94 95 __cfaabi_dbg_print_safe( "Kernel : mon is free \n" ); 96 } 97 else if( this->owner == thrd) { 98 // We already have the monitor, just note how many times we took it 99 this->recursion += 1; 100 101 __cfaabi_dbg_print_safe( "Kernel : mon already owned \n" ); 102 } 103 else if( is_accepted( this, group) ) { 104 // Some one was waiting for us, enter 105 __set_owner( this, thrd ); 106 107 // Reset mask 108 reset_mask( this ); 109 110 __cfaabi_dbg_print_safe( "Kernel : mon accepts \n" ); 111 } 112 else { 113 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 114 115 // Some one else has the monitor, wait in line for it 116 /* paranoid */ verify( thrd->next == 0p ); 117 append( this->entry_queue, thrd ); 118 /* paranoid */ verify( thrd->next == 1p ); 119 120 unlock( this->lock ); 121 park(); 131 122 132 123 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 133 124 134 125 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 135 /* paranoid */ verify( this->lock.lock ); 136 137 // Release the lock and leave 126 return; 127 } 128 129 __cfaabi_dbg_print_safe( "Kernel : %10p Entered mon %p\n", thrd, this); 130 131 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 132 /* paranoid */ verify( this->lock.lock ); 133 134 // Release the lock and leave 135 unlock( this->lock ); 136 return; 137 } 138 139 static void __dtor_enter( monitor_desc * this, fptr_t func ) { 140 // Lock the monitor spinlock 141 lock( this->lock __cfaabi_dbg_ctx2 ); 142 // Interrupts disable inside critical section 143 thread_desc * thrd = kernelTLS.this_thread; 144 145 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 146 147 148 if( !this->owner ) { 149 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 150 151 // No one has the monitor, just take it 152 __set_owner( this, thrd ); 153 154 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 155 138 156 unlock( this->lock ); 139 157 return; 140 158 } 141 142 static void __enter_monitor_dtor( monitor_desc * this, fptr_t func ) { 143 // Lock the monitor spinlock 144 lock( this->lock __cfaabi_dbg_ctx2 ); 145 // Interrupts disable inside critical section 146 thread_desc * thrd = kernelTLS.this_thread; 147 148 __cfaabi_dbg_print_safe( "Kernel : %10p Entering dtor for mon %p (%p)\n", thrd, this, this->owner); 149 150 151 if( !this->owner ) { 152 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); 153 154 // No one has the monitor, just take it 155 set_owner( this, thrd ); 156 157 verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 158 159 unlock( this->lock ); 160 return; 159 else if( this->owner == thrd) { 160 // We already have the monitor... but where about to destroy it so the nesting will fail 161 // Abort! 162 abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd ); 163 } 164 165 __lock_size_t count = 1; 166 monitor_desc ** monitors = &this; 167 __monitor_group_t group = { &this, 1, func }; 168 if( is_accepted( this, group) ) { 169 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 170 171 // Wake the thread that is waiting for this 172 __condition_criterion_t * urgent = pop( this->signal_stack ); 173 /* paranoid */ verify( urgent ); 174 175 // Reset mask 176 reset_mask( this ); 177 178 // Create the node specific to this wait operation 179 wait_ctx_primed( thrd, 0 ) 180 181 // Some one else has the monitor, wait for him to finish and then run 182 unlock( this->lock ); 183 184 // Release the next thread 185 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 186 unpark( urgent->owner->waiting_thread ); 187 188 // Park current thread waiting 189 park(); 190 191 // Some one was waiting for us, enter 192 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 193 } 194 else { 195 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 196 197 wait_ctx( thrd, 0 ) 198 this->dtor_node = &waiter; 199 200 // Some one else has the monitor, wait in line for it 201 /* paranoid */ verify( thrd->next == 0p ); 202 append( this->entry_queue, thrd ); 203 /* paranoid */ verify( thrd->next == 1p ); 204 unlock( this->lock ); 205 206 // Park current thread waiting 207 park(); 208 209 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 210 return; 211 } 212 213 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 214 215 } 216 217 // Leave single monitor 218 void __leave( monitor_desc * this ) { 219 // Lock the monitor spinlock 220 lock( this->lock __cfaabi_dbg_ctx2 ); 221 222 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 223 224 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 225 226 // Leaving a recursion level, decrement the counter 227 this->recursion -= 1; 228 229 // If we haven't left the last level of recursion 230 // it means we don't need to do anything 231 if( this->recursion != 0) { 232 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 233 unlock( this->lock ); 234 return; 235 } 236 237 // Get the next thread, will be null on low contention monitor 238 thread_desc * new_owner = next_thread( this ); 239 240 // Check the new owner is consistent with who we wake-up 241 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor 242 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 243 244 // We can now let other threads in safely 245 unlock( this->lock ); 246 247 //We need to wake-up the thread 248 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 249 unpark( new_owner ); 250 } 251 252 // Leave single monitor for the last time 253 void __dtor_leave( monitor_desc * this ) { 254 __cfaabi_dbg_debug_do( 255 if( TL_GET( this_thread ) != this->owner ) { 256 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 161 257 } 162 else if( this->owner == thrd) { 163 // We already have the monitor... but where about to destroy it so the nesting will fail 164 // Abort! 165 abort( "Attempt to destroy monitor %p by thread \"%.256s\" (%p) in nested mutex.", this, thrd->self_cor.name, thrd ); 258 if( this->recursion != 1 ) { 259 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 166 260 } 167 168 __lock_size_t count = 1; 169 monitor_desc ** monitors = &this; 170 __monitor_group_t group = { &this, 1, func }; 171 if( is_accepted( this, group) ) { 172 __cfaabi_dbg_print_safe( "Kernel : mon accepts dtor, block and signal it \n" ); 173 174 // Wake the thread that is waiting for this 175 __condition_criterion_t * urgent = pop( this->signal_stack ); 176 /* paranoid */ verify( urgent ); 177 178 // Reset mask 179 reset_mask( this ); 180 181 // Create the node specific to this wait operation 182 wait_ctx_primed( thrd, 0 ) 183 184 // Some one else has the monitor, wait for him to finish and then run 185 unlock( this->lock ); 186 187 // Release the next thread 188 /* paranoid */ verifyf( urgent->owner->waiting_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 189 unpark( urgent->owner->waiting_thread ); 190 191 // Park current thread waiting 192 park(); 193 194 // Some one was waiting for us, enter 195 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 196 } 197 else { 198 __cfaabi_dbg_print_safe( "Kernel : blocking \n" ); 199 200 wait_ctx( thrd, 0 ) 201 this->dtor_node = &waiter; 202 203 // Some one else has the monitor, wait in line for it 204 /* paranoid */ verify( thrd->next == 0p ); 205 append( this->entry_queue, thrd ); 206 /* paranoid */ verify( thrd->next == 1p ); 207 unlock( this->lock ); 208 209 // Park current thread waiting 210 park(); 211 212 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 213 return; 214 } 215 216 __cfaabi_dbg_print_safe( "Kernel : Destroying %p\n", this); 217 218 } 219 220 // Leave single monitor 221 void __leave_monitor_desc( monitor_desc * this ) { 222 // Lock the monitor spinlock 223 lock( this->lock __cfaabi_dbg_ctx2 ); 224 225 __cfaabi_dbg_print_safe( "Kernel : %10p Leaving mon %p (%p)\n", kernelTLS.this_thread, this, this->owner); 226 227 /* paranoid */ verifyf( kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 228 229 // Leaving a recursion level, decrement the counter 230 this->recursion -= 1; 231 232 // If we haven't left the last level of recursion 233 // it means we don't need to do anything 234 if( this->recursion != 0) { 235 __cfaabi_dbg_print_safe( "Kernel : recursion still %d\n", this->recursion); 236 unlock( this->lock ); 237 return; 238 } 239 240 // Get the next thread, will be null on low contention monitor 241 thread_desc * new_owner = next_thread( this ); 242 243 // Check the new owner is consistent with who we wake-up 244 // new_owner might be null even if someone owns the monitor when the owner is still waiting for another monitor 245 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 246 247 // We can now let other threads in safely 248 unlock( this->lock ); 249 250 //We need to wake-up the thread 251 /* paranoid */ verifyf( !new_owner || new_owner == this->owner, "Expected owner to be %p, got %p (m: %p)", new_owner, this->owner, this ); 252 unpark( new_owner ); 253 } 254 255 // Leave single monitor for the last time 256 void __leave_dtor_monitor_desc( monitor_desc * this ) { 257 __cfaabi_dbg_debug_do( 258 if( TL_GET( this_thread ) != this->owner ) { 259 abort( "Destroyed monitor %p has inconsistent owner, expected %p got %p.\n", this, TL_GET( this_thread ), this->owner); 260 } 261 if( this->recursion != 1 ) { 262 abort( "Destroyed monitor %p has %d outstanding nested calls.\n", this, this->recursion - 1); 263 } 264 ) 265 } 266 261 ) 262 } 263 264 extern "C" { 267 265 // Leave the thread monitor 268 266 // last routine called by a thread. 269 267 // Should never return 270 void __ leave_thread_monitor() {268 void __cfactx_thrd_leave() { 271 269 thread_desc * thrd = TL_GET( this_thread ); 272 270 monitor_desc * this = &thrd->self_mon; … … 313 311 static inline void enter( __monitor_group_t monitors ) { 314 312 for( __lock_size_t i = 0; i < monitors.size; i++) { 315 __enter _monitor_desc( monitors[i], monitors );313 __enter( monitors[i], monitors ); 316 314 } 317 315 } … … 321 319 static inline void leave(monitor_desc * monitors [], __lock_size_t count) { 322 320 for( __lock_size_t i = count - 1; i >= 0; i--) { 323 __leave _monitor_desc( monitors[i] );321 __leave( monitors[i] ); 324 322 } 325 323 } … … 381 379 (thrd->monitors){m, 1, func}; 382 380 383 __ enter_monitor_dtor( this.m, func );381 __dtor_enter( this.m, func ); 384 382 } 385 383 … … 387 385 void ^?{}( monitor_dtor_guard_t & this ) { 388 386 // Leave the monitors in order 389 __ leave_dtor_monitor_desc( this.m );387 __dtor_leave( this.m ); 390 388 391 389 // Restore thread context … … 537 535 thread_desc * signallee = pop_head( this.blocked )->waiting_thread; 538 536 /* paranoid */ verify( signallee->next == 0p ); 539 set_owner( monitors, count, signallee );537 __set_owner( monitors, count, signallee ); 540 538 541 539 __cfaabi_dbg_print_buffer_decl( "Kernel : signal_block condition %p (s: %p)\n", &this, signallee ); … … 643 641 644 642 // Set the owners to be the next thread 645 set_owner( monitors, count, next );643 __set_owner( monitors, count, next ); 646 644 647 645 // unlock all the monitors … … 711 709 // Utilities 712 710 713 static inline void set_owner( monitor_desc * this, thread_desc * owner ) {711 static inline void __set_owner( monitor_desc * this, thread_desc * owner ) { 714 712 /* paranoid */ verify( this->lock.lock ); 715 713 … … 721 719 } 722 720 723 static inline void set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) {721 static inline void __set_owner( monitor_desc * monitors [], __lock_size_t count, thread_desc * owner ) { 724 722 /* paranoid */ verify ( monitors[0]->lock.lock ); 725 723 /* paranoid */ verifyf( monitors[0]->owner == kernelTLS.this_thread, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, monitors[0]->owner, monitors[0]->recursion, monitors[0] ); … … 755 753 //we need to set the monitor as in use 756 754 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 757 set_owner( this, urgent->owner->waiting_thread );755 __set_owner( this, urgent->owner->waiting_thread ); 758 756 759 757 return check_condition( urgent ); … … 765 763 /* paranoid */ verifyf( !this->owner || kernelTLS.this_thread == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", kernelTLS.this_thread, this->owner, this->recursion, this ); 766 764 /* paranoid */ verify( !new_owner || new_owner->next == 0p ); 767 set_owner( this, new_owner );765 __set_owner( this, new_owner ); 768 766 769 767 return new_owner; -
libcfa/src/concurrency/preemption.cfa
ra505021 rc7a900a 184 184 185 185 // Enable interrupts by decrementing the counter 186 // If counter reaches 0, execute any pending CtxSwitch186 // If counter reaches 0, execute any pending __cfactx_switch 187 187 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 188 188 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store … … 218 218 219 219 // Disable interrupts by incrementint the counter 220 // Don't execute any pending CtxSwitch even if counter reaches 0220 // Don't execute any pending __cfactx_switch even if counter reaches 0 221 221 void enable_interrupts_noPoll() { 222 222 unsigned short prev = kernelTLS.preemption_state.disable_count; … … 272 272 273 273 // KERNEL ONLY 274 // Check if a CtxSwitch signal handler shoud defer274 // Check if a __cfactx_switch signal handler shoud defer 275 275 // If true : preemption is safe 276 276 // If false : preemption is unsafe and marked as pending … … 302 302 303 303 // Setup proper signal handlers 304 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler304 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // __cfactx_switch handler 305 305 306 306 signal_block( SIGALRM ); … … 393 393 // Preemption can occur here 394 394 395 force_yield( __ALARM_PREEMPTION ); // Do the actual CtxSwitch395 force_yield( __ALARM_PREEMPTION ); // Do the actual __cfactx_switch 396 396 } 397 397 -
libcfa/src/concurrency/thread.cfa
ra505021 rc7a900a 56 56 57 57 disable_interrupts(); 58 CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);58 __cfactx_start(main_p, get_coroutine(this), this, __cfactx_invoke_thread); 59 59 60 60 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
Note: See TracChangeset
for help on using the changeset viewer.