Changeset 3b56166 for libcfa/src/concurrency
- Timestamp:
- Feb 10, 2020, 11:17:38 AM (6 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 3966d9a, 41efd33
- Parents:
- 807a632 (diff), d231700 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/CtxSwitch-x86_64.S
r807a632 r3b56166 87 87 CtxInvokeStub: 88 88 movq %rbx, %rdi 89 jmp *%r12 89 movq %r12, %rsi 90 jmp *%r13 90 91 .size CtxInvokeStub, .-CtxInvokeStub 91 92 -
libcfa/src/concurrency/alarm.cfa
r807a632 r3b56166 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri May 25 06:25:47 201813 // Update Count : 6 712 // Last Modified On : Sun Jan 5 08:41:36 2020 13 // Update Count : 69 14 14 // 15 15 … … 39 39 40 40 void __kernel_set_timer( Duration alarm ) { 41 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm .tv);42 setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL);41 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm`ns); 42 setitimer( ITIMER_REAL, &(itimerval){ alarm }, 0p ); 43 43 } 44 44 … … 113 113 this->tail = &this->head; 114 114 } 115 head->next = NULL;115 head->next = 0p; 116 116 } 117 117 verify( validate( this ) ); … … 127 127 this->tail = it; 128 128 } 129 n->next = NULL;129 n->next = 0p; 130 130 131 131 verify( validate( this ) ); -
libcfa/src/concurrency/coroutine.cfa
r807a632 r3b56166 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 17:20:57 201813 // Update Count : 912 // Last Modified On : Tue Feb 4 12:29:25 2020 13 // Update Count : 16 14 14 // 15 15 … … 89 89 } 90 90 91 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize ) with( this ) {92 (this.context){ NULL, NULL};91 void ?{}( coroutine_desc & this, const char name[], void * storage, size_t storageSize ) with( this ) { 92 (this.context){0p, 0p}; 93 93 (this.stack){storage, storageSize}; 94 94 this.name = name; 95 95 state = Start; 96 starter = NULL;97 last = NULL;98 cancellation = NULL;96 starter = 0p; 97 last = 0p; 98 cancellation = 0p; 99 99 } 100 100 … … 131 131 132 132 [void *, size_t] __stack_alloc( size_t storageSize ) { 133 staticconst size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment133 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 134 134 assert(__page_size != 0l); 135 135 size_t size = libCeiling( storageSize, 16 ) + stack_data_size; … … 157 157 158 158 void __stack_prepare( __stack_info_t * this, size_t create_size ) { 159 staticconst size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment159 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 160 160 bool userStack; 161 161 void * storage; … … 187 187 // is not inline (We can't inline Cforall in C) 188 188 extern "C" { 189 void __suspend_internal(void) { 190 suspend(); 191 } 192 193 void __leave_coroutine( coroutine_desc * src ) { 189 void __leave_coroutine( struct coroutine_desc * src ) { 194 190 coroutine_desc * starter = src->cancellation != 0 ? src->last : src->starter; 195 191 … … 207 203 CoroutineCtxSwitch( src, starter ); 208 204 } 205 206 struct coroutine_desc * __finish_coroutine(void) { 207 struct coroutine_desc * cor = kernelTLS.this_thread->curr_cor; 208 209 if(cor->state == Primed) { 210 suspend(); 211 } 212 213 cor->state = Active; 214 215 return cor; 216 } 209 217 } 210 218 -
libcfa/src/concurrency/coroutine.hfa
r807a632 r3b56166 10 10 // Created On : Mon Nov 28 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 21 17:49:39 201913 // Update Count : 912 // Last Modified On : Tue Feb 4 12:29:26 2020 13 // Update Count : 11 14 14 // 15 15 … … 35 35 // void ^?{}( coStack_t & this ); 36 36 37 void ?{}( coroutine_desc & this, const char * name, void * storage, size_t storageSize );37 void ?{}( coroutine_desc & this, const char name[], void * storage, size_t storageSize ); 38 38 void ^?{}( coroutine_desc & this ); 39 39 40 static inline void ?{}( coroutine_desc & this) { this{ "Anonymous Coroutine", NULL, 0 }; }41 static inline void ?{}( coroutine_desc & this, size_t stackSize) { this{ "Anonymous Coroutine", NULL, stackSize }; }40 static inline void ?{}( coroutine_desc & this) { this{ "Anonymous Coroutine", 0p, 0 }; } 41 static inline void ?{}( coroutine_desc & this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; } 42 42 static inline void ?{}( coroutine_desc & this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; } 43 static inline void ?{}( coroutine_desc & this, const char * name) { this{ name, NULL, 0 }; }44 static inline void ?{}( coroutine_desc & this, const char * name, size_t stackSize ) { this{ name, NULL, stackSize }; }43 static inline void ?{}( coroutine_desc & this, const char name[]) { this{ name, 0p, 0 }; } 44 static inline void ?{}( coroutine_desc & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; } 45 45 46 46 //----------------------------------------------------------------------------- … … 61 61 // Start coroutine routines 62 62 extern "C" { 63 forall(dtype T | is_coroutine(T)) 64 void CtxInvokeCoroutine(T * this); 63 void CtxInvokeCoroutine(void (*main)(void *), void * this); 65 64 66 forall(dtype T | is_coroutine(T))67 void CtxStart(T * this, void ( *invoke)(T*));65 forall(dtype T) 66 void CtxStart(void (*main)(T &), struct coroutine_desc * cor, T & this, void (*invoke)(void (*main)(void *), void *)); 68 67 69 68 extern void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine_desc *) __attribute__ ((__noreturn__)); … … 89 88 src->state = Active; 90 89 91 if( unlikely(src->cancellation != NULL) ) {90 if( unlikely(src->cancellation != 0p) ) { 92 91 _CtxCoroutine_Unwind(src->cancellation, src); 93 92 } … … 128 127 coroutine_desc * dst = get_coroutine(cor); 129 128 130 if( unlikely(dst->context.SP == NULL) ) { 129 if( unlikely(dst->context.SP == 0p) ) { 130 TL_GET( this_thread )->curr_cor = dst; 131 131 __stack_prepare(&dst->stack, 65000); 132 CtxStart(&cor, CtxInvokeCoroutine); 132 CtxStart(main, dst, cor, CtxInvokeCoroutine); 133 TL_GET( this_thread )->curr_cor = src; 133 134 } 134 135 -
libcfa/src/concurrency/invoke.c
r807a632 r3b56166 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern void __suspend_internal(void); 32 extern void __leave_coroutine( struct coroutine_desc * ); 33 extern void __finish_creation( struct thread_desc * ); 34 extern void __leave_thread_monitor( struct thread_desc * this ); 31 extern void __leave_coroutine ( struct coroutine_desc * ); 32 extern struct coroutine_desc * __finish_coroutine(void); 33 extern void __leave_thread_monitor(); 35 34 extern void disable_interrupts() OPTIONAL_THREAD; 36 35 extern void enable_interrupts( __cfaabi_dbg_ctx_param ); … … 38 37 void CtxInvokeCoroutine( 39 38 void (*main)(void *), 40 struct coroutine_desc *(*get_coroutine)(void *),41 39 void *this 42 40 ) { 43 struct coroutine_desc* cor = get_coroutine( this ); 41 // Finish setting up the coroutine by setting its state 42 struct coroutine_desc * cor = __finish_coroutine(); 44 43 45 if(cor->state == Primed) { 46 __suspend_internal(); 47 } 48 49 cor->state = Active; 50 44 // Call the main of the coroutine 51 45 main( this ); 52 46 … … 83 77 84 78 void CtxInvokeThread( 85 void (*dtor)(void *),86 79 void (*main)(void *), 87 struct thread_desc *(*get_thread)(void *),88 80 void *this 89 81 ) { 90 // Fetch the thread handle from the user defined thread structure91 struct thread_desc* thrd = get_thread( this );92 93 // First suspend, once the thread arrives here,94 // the function pointer to main can be invalidated without risk95 __finish_creation( thrd );96 97 82 // Officially start the thread by enabling preemption 98 83 enable_interrupts( __cfaabi_dbg_ctx ); … … 108 93 // The order of these 4 operations is very important 109 94 //Final suspend, should never return 110 __leave_thread_monitor( thrd);95 __leave_thread_monitor(); 111 96 __cabi_abort( "Resumed dead thread" ); 112 97 } 113 98 114 115 99 void CtxStart( 116 100 void (*main)(void *), 117 struct coroutine_desc * (*get_coroutine)(void *),101 struct coroutine_desc * cor, 118 102 void *this, 119 103 void (*invoke)(void *) 120 104 ) { 121 struct coroutine_desc * cor = get_coroutine( this );122 105 struct __stack_t * stack = cor->stack.storage; 123 106 … … 138 121 139 122 fs->dummyReturn = NULL; 140 fs->argument[0] = this; // argument to invoke 123 fs->argument[0] = main; // argument to invoke 124 fs->argument[1] = this; // argument to invoke 141 125 fs->rturn = invoke; 142 126 … … 156 140 fs->dummyReturn = NULL; 157 141 fs->rturn = CtxInvokeStub; 158 fs->fixedRegisters[0] = this; 159 fs->fixedRegisters[1] = invoke; 142 fs->fixedRegisters[0] = main; 143 fs->fixedRegisters[1] = this; 144 fs->fixedRegisters[2] = invoke; 160 145 161 146 #elif defined( __ARM_ARCH ) 162 147 #error ARM needs to be upgrade to use to parameters like X86/X64 (A.K.A. : I broke this and do not know how to fix it) 163 148 struct FakeStack { 164 149 float fpRegs[16]; // floating point registers -
libcfa/src/concurrency/invoke.h
r807a632 r3b56166 10 10 // Created On : Tue Jan 17 12:27:26 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 22 18:19:13 201913 // Update Count : 4 012 // Last Modified On : Thu Dec 5 16:26:03 2019 13 // Update Count : 44 14 14 // 15 15 … … 46 46 #ifdef __cforall 47 47 extern "Cforall" { 48 extern thread_local struct KernelThreadData {48 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 49 49 struct thread_desc * volatile this_thread; 50 50 struct processor * volatile this_processor; … … 55 55 volatile bool in_progress; 56 56 } preemption_state; 57 58 uint32_t rand_seed; 57 59 } kernelTLS __attribute__ ((tls_model ( "initial-exec" ))); 58 60 } … … 205 207 206 208 static inline void ?{}(__monitor_group_t & this) { 207 (this.data){ NULL};209 (this.data){0p}; 208 210 (this.size){0}; 209 211 (this.func){NULL}; -
libcfa/src/concurrency/kernel.cfa
r807a632 r3b56166 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T hu Jun 20 17:21:23 201913 // Update Count : 2512 // Last Modified On : Tue Feb 4 13:03:15 2020 13 // Update Count : 58 14 14 // 15 15 … … 26 26 #include <signal.h> 27 27 #include <unistd.h> 28 #include <limits.h> // PTHREAD_STACK_MIN 29 #include <sys/mman.h> // mprotect 28 30 } 29 31 … … 40 42 //----------------------------------------------------------------------------- 41 43 // Some assembly required 42 #if 44 #if defined( __i386 ) 43 45 #define CtxGet( ctx ) \ 44 46 __asm__ volatile ( \ … … 123 125 124 126 extern "C" { 125 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;127 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 126 128 } 127 129 … … 131 133 // Global state 132 134 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = { 135 NULL, // cannot use 0p 133 136 NULL, 134 NULL,135 { 1, false, false }137 { 1, false, false }, 138 6u //this should be seeded better but due to a bug calling rdtsc doesn't work 136 139 }; 137 140 … … 139 142 // Struct to steal stack 140 143 struct current_stack_info_t { 141 __stack_t * storage; // pointer to stack object142 void * base;// base of stack143 void * limit;// stack grows towards stack limit144 void * context;// address of cfa_context_t144 __stack_t * storage; // pointer to stack object 145 void * base; // base of stack 146 void * limit; // stack grows towards stack limit 147 void * context; // address of cfa_context_t 145 148 }; 146 149 … … 171 174 name = "Main Thread"; 172 175 state = Start; 173 starter = NULL;174 last = NULL;175 cancellation = NULL;176 starter = 0p; 177 last = 0p; 178 cancellation = 0p; 176 179 } 177 180 … … 184 187 self_mon.recursion = 1; 185 188 self_mon_p = &self_mon; 186 next = NULL;187 188 node.next = NULL;189 node.prev = NULL;189 next = 0p; 190 191 node.next = 0p; 192 node.prev = 0p; 190 193 doregister(curr_cluster, this); 191 194 … … 206 209 207 210 static void start(processor * this); 208 void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {211 void ?{}(processor & this, const char name[], cluster & cltr) with( this ) { 209 212 this.name = name; 210 213 this.cltr = &cltr; 211 214 terminated{ 0 }; 212 215 do_terminate = false; 213 preemption_alarm = NULL;216 preemption_alarm = 0p; 214 217 pending_preemption = false; 215 218 runner.proc = &this; … … 231 234 } 232 235 233 pthread_join( kernel_thread, NULL ); 234 } 235 236 void ?{}(cluster & this, const char * name, Duration preemption_rate) with( this ) { 236 pthread_join( kernel_thread, 0p ); 237 free( this.stack ); 238 } 239 240 void ?{}(cluster & this, const char name[], Duration preemption_rate) with( this ) { 237 241 this.name = name; 238 242 this.preemption_rate = preemption_rate; … … 260 264 //Main of the processor contexts 261 265 void main(processorCtx_t & runner) { 266 // Because of a bug, we couldn't initialized the seed on construction 267 // Do it here 268 kernelTLS.rand_seed ^= rdtscl(); 269 262 270 processor * this = runner.proc; 263 271 verify(this); … … 273 281 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 274 282 275 thread_desc * readyThread = NULL; 276 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) 277 { 283 thread_desc * readyThread = 0p; 284 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 278 285 readyThread = nextThread( this->cltr ); 279 286 280 if(readyThread) 281 { 287 if(readyThread) { 282 288 verify( ! kernelTLS.preemption_state.enabled ); 283 289 … … 290 296 291 297 spin_count = 0; 292 } 293 else 294 { 298 } else { 295 299 // spin(this, &spin_count); 296 300 halt(this); … … 405 409 processor * proc = (processor *) arg; 406 410 kernelTLS.this_processor = proc; 407 kernelTLS.this_thread = NULL;411 kernelTLS.this_thread = 0p; 408 412 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 409 413 // SKULLDUGGERY: We want to create a context for the processor coroutine … … 418 422 419 423 //Set global state 420 kernelTLS.this_thread = NULL;424 kernelTLS.this_thread = 0p; 421 425 422 426 //We now have a proper context from which to schedule threads … … 434 438 __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner); 435 439 436 return NULL; 440 return 0p; 441 } 442 443 static void Abort( int ret, const char func[] ) { 444 if ( ret ) { // pthread routines return errno values 445 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) ); 446 } // if 447 } // Abort 448 449 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 450 pthread_attr_t attr; 451 452 Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 453 454 size_t stacksize; 455 // default stack size, normally defined by shell limit 456 Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" ); 457 assert( stacksize >= PTHREAD_STACK_MIN ); 458 459 void * stack; 460 __cfaabi_dbg_debug_do( 461 stack = memalign( __page_size, stacksize + __page_size ); 462 // pthread has no mechanism to create the guard page in user supplied stack. 463 if ( mprotect( stack, __page_size, PROT_NONE ) == -1 ) { 464 abort( "mprotect : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) ); 465 } // if 466 ); 467 __cfaabi_dbg_no_debug_do( 468 stack = malloc( stacksize ); 469 ); 470 471 Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" ); 472 473 Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" ); 474 return stack; 437 475 } 438 476 … … 440 478 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this); 441 479 442 pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this );480 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this ); 443 481 444 482 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); … … 452 490 verify( ! kernelTLS.preemption_state.enabled ); 453 491 492 kernelTLS.this_thread->curr_cor = dst; 454 493 __stack_prepare( &dst->stack, 65000 ); 455 CtxStart( &this->runner, CtxInvokeCoroutine);494 CtxStart(main, dst, this->runner, CtxInvokeCoroutine); 456 495 457 496 verify( ! kernelTLS.preemption_state.enabled ); … … 468 507 // when CtxSwitch returns we are back in the src coroutine 469 508 509 mainThread->curr_cor = &mainThread->self_cor; 510 470 511 // set state of new coroutine to active 471 512 src->state = Active; … … 497 538 verify( ! kernelTLS.preemption_state.enabled ); 498 539 499 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );540 verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 500 541 501 542 with( *thrd->curr_cluster ) { … … 676 717 void ?{}(processorCtx_t & this, processor * proc) { 677 718 (this.__cor){ "Processor" }; 678 this.__cor.starter = NULL;719 this.__cor.starter = 0p; 679 720 this.proc = proc; 680 721 } … … 685 726 terminated{ 0 }; 686 727 do_terminate = false; 687 preemption_alarm = NULL;728 preemption_alarm = 0p; 688 729 pending_preemption = false; 689 730 kernel_thread = pthread_self(); … … 803 844 sigemptyset( &mask ); 804 845 sigaddset( &mask, SIGALRM ); // block SIGALRM signals 805 sigsuspend( &mask ); // block the processor to prevent further damage during abort 806 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 846 sigaddset( &mask, SIGUSR1 ); // block SIGALRM signals 847 sigsuspend( &mask ); // block the processor to prevent further damage during abort 848 _exit( EXIT_FAILURE ); // if processor unblocks before it is killed, terminate it 807 849 } 808 850 else { … … 819 861 if(thrd) { 820 862 int len = snprintf( abort_text, abort_text_size, "Error occurred while executing thread %.256s (%p)", thrd->self_cor.name, thrd ); 821 __cfaabi_ dbg_bits_write(abort_text, len );863 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 822 864 823 865 if ( &thrd->self_cor != thrd->curr_cor ) { 824 866 len = snprintf( abort_text, abort_text_size, " in coroutine %.256s (%p).\n", thrd->curr_cor->name, thrd->curr_cor ); 825 __cfaabi_ dbg_bits_write(abort_text, len );867 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 826 868 } 827 869 else { 828 __cfaabi_ dbg_bits_write(".\n", 2 );870 __cfaabi_bits_write( STDERR_FILENO, ".\n", 2 ); 829 871 } 830 872 } 831 873 else { 832 874 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); 833 __cfaabi_ dbg_bits_write(abort_text, len );875 __cfaabi_bits_write( STDERR_FILENO, abort_text, len ); 834 876 } 835 877 } … … 842 884 843 885 extern "C" { 844 void __cfaabi_ dbg_bits_acquire() {886 void __cfaabi_bits_acquire() { 845 887 lock( kernel_debug_lock __cfaabi_dbg_ctx2 ); 846 888 } 847 889 848 void __cfaabi_ dbg_bits_release() {890 void __cfaabi_bits_release() { 849 891 unlock( kernel_debug_lock ); 850 892 } … … 879 921 880 922 void V(semaphore & this) with( this ) { 881 thread_desc * thrd = NULL;923 thread_desc * thrd = 0p; 882 924 lock( lock __cfaabi_dbg_ctx2 ); 883 925 count += 1; … … 939 981 __cfaabi_dbg_debug_do( 940 982 extern "C" { 941 void __cfaabi_dbg_record(__spinlock_t & this, const char * prev_name) {983 void __cfaabi_dbg_record(__spinlock_t & this, const char prev_name[]) { 942 984 this.prev_name = prev_name; 943 985 this.prev_thrd = kernelTLS.this_thread; -
libcfa/src/concurrency/kernel.hfa
r807a632 r3b56166 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Jun 22 11:39:17 201913 // Update Count : 1612 // Last Modified On : Tue Feb 4 12:29:26 2020 13 // Update Count : 22 14 14 // 15 15 … … 20 20 #include "invoke.h" 21 21 #include "time_t.hfa" 22 #include "coroutine.hfa" 22 23 23 24 extern "C" { … … 88 89 static inline void ?{}(FinishAction & this) { 89 90 this.action_code = No_Action; 90 this.thrd = NULL;91 this.lock = NULL;91 this.thrd = 0p; 92 this.lock = 0p; 92 93 } 93 94 static inline void ^?{}(FinishAction &) {} … … 134 135 semaphore terminated; 135 136 137 // pthread Stack 138 void * stack; 139 136 140 // Link lists fields 137 141 struct __dbg_node_proc { … … 146 150 }; 147 151 148 void ?{}(processor & this, const char * name, struct cluster & cltr);152 void ?{}(processor & this, const char name[], struct cluster & cltr); 149 153 void ^?{}(processor & this); 150 154 151 155 static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; } 152 156 static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; } 153 static inline void ?{}(processor & this, const char * name) { this{name, *mainCluster }; }157 static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; } 154 158 155 159 static inline [processor *&, processor *& ] __get( processor & this ) { … … 191 195 extern Duration default_preemption(); 192 196 193 void ?{} (cluster & this, const char * name, Duration preemption_rate);197 void ?{} (cluster & this, const char name[], Duration preemption_rate); 194 198 void ^?{}(cluster & this); 195 199 196 200 static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption()}; } 197 201 static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate}; } 198 static inline void ?{} (cluster & this, const char * name) { this{name, default_preemption()}; }202 static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption()}; } 199 203 200 204 static inline [cluster *&, cluster *& ] __get( cluster & this ) { -
libcfa/src/concurrency/kernel_private.hfa
r807a632 r3b56166 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Mar 29 14:06:40 201813 // Update Count : 312 // Last Modified On : Sat Nov 30 19:25:02 2019 13 // Update Count : 8 14 14 // 15 15 … … 57 57 void main(processorCtx_t *); 58 58 59 void * create_pthread( pthread_t *, void * (*)(void *), void * ); 60 59 61 static inline void wake_fast(processor * this) { 60 62 __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this); … … 86 88 // Threads 87 89 extern "C" { 88 forall(dtype T | is_thread(T)) 89 void CtxInvokeThread(T * this); 90 void CtxInvokeThread(void (*main)(void *), void * this); 90 91 } 91 92 … … 100 101 // Utils 101 102 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 103 104 static inline uint32_t tls_rand() { 105 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6; 106 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21; 107 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7; 108 return kernelTLS.rand_seed; 109 } 102 110 103 111 -
libcfa/src/concurrency/monitor.cfa
r807a632 r3b56166 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 14:30:26 201813 // Update Count : 912 // Last Modified On : Wed Dec 4 07:55:14 2019 13 // Update Count : 10 14 14 // 15 15 … … 243 243 // last routine called by a thread. 244 244 // Should never return 245 void __leave_thread_monitor( thread_desc * thrd ) { 245 void __leave_thread_monitor() { 246 thread_desc * thrd = TL_GET( this_thread ); 246 247 monitor_desc * this = &thrd->self_mon; 247 248 … … 363 364 this.waiting_thread = waiting_thread; 364 365 this.count = count; 365 this.next = NULL;366 this.next = 0p; 366 367 this.user_info = user_info; 367 368 } … … 369 370 void ?{}(__condition_criterion_t & this ) with( this ) { 370 371 ready = false; 371 target = NULL;372 owner = NULL;373 next = NULL;372 target = 0p; 373 owner = 0p; 374 next = 0p; 374 375 } 375 376 … … 378 379 this.target = target; 379 380 this.owner = &owner; 380 this.next = NULL;381 this.next = 0p; 381 382 } 382 383 … … 387 388 388 389 // Check that everything is as expected 389 assertf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );390 assertf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors ); 390 391 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count ); 391 392 verifyf( this.monitor_count < 32u, "Excessive monitor count (%"PRIiFAST16")", this.monitor_count ); … … 449 450 450 451 // Lock all monitors 451 lock_all( this.monitors, NULL, count );452 lock_all( this.monitors, 0p, count ); 452 453 453 454 //Pop the head of the waiting queue … … 471 472 472 473 //Check that everything is as expected 473 verifyf( this.monitors != NULL, "Waiting with no monitors (%p)", this.monitors );474 verifyf( this.monitors != 0p, "Waiting with no monitors (%p)", this.monitors ); 474 475 verifyf( this.monitor_count != 0, "Waiting with 0 monitors (%"PRIiFAST16")", this.monitor_count ); 475 476 … … 674 675 675 676 static inline void reset_mask( monitor_desc * this ) { 676 this->mask.accepted = NULL;677 this->mask.data = NULL;677 this->mask.accepted = 0p; 678 this->mask.data = 0p; 678 679 this->mask.size = 0; 679 680 } … … 816 817 } 817 818 818 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : NULL);819 return ready2run ? node->waiting_thread : NULL;819 __cfaabi_dbg_print_safe( "Kernel : Runing %i (%p)\n", ready2run, ready2run ? node->waiting_thread : 0p ); 820 return ready2run ? node->waiting_thread : 0p; 820 821 } 821 822 … … 824 825 if( !this.monitors ) { 825 826 // __cfaabi_dbg_print_safe( "Branding\n" ); 826 assertf( thrd->monitors.data != NULL, "No current monitor to brand condition %p", thrd->monitors.data );827 assertf( thrd->monitors.data != 0p, "No current monitor to brand condition %p", thrd->monitors.data ); 827 828 this.monitor_count = thrd->monitors.size; 828 829 -
libcfa/src/concurrency/monitor.hfa
r807a632 r3b56166 10 10 // Created On : Thd Feb 23 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Oct 7 18:06:45 201713 // Update Count : 1 012 // Last Modified On : Wed Dec 4 07:55:32 2019 13 // Update Count : 11 14 14 // 15 15 … … 31 31 entry_queue{}; 32 32 signal_stack{}; 33 owner = NULL;33 owner = 0p; 34 34 recursion = 0; 35 mask.accepted = NULL;36 mask.data = NULL;35 mask.accepted = 0p; 36 mask.data = 0p; 37 37 mask.size = 0; 38 dtor_node = NULL;38 dtor_node = 0p; 39 39 } 40 41 static inline void ^?{}(monitor_desc & ) {} 40 42 41 43 struct monitor_guard_t { … … 120 122 121 123 static inline void ?{}( condition & this ) { 122 this.monitors = NULL;124 this.monitors = 0p; 123 125 this.monitor_count = 0; 124 126 } -
libcfa/src/concurrency/mutex.cfa
r807a632 r3b56166 11 11 // Author : Thierry Delisle 12 12 // Created On : Fri May 25 01:37:11 2018 13 // Last Modified By : Thierry Delisle14 // Last Modified On : Fri May 25 01:37:51 201815 // Update Count : 013 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Wed Dec 4 09:16:39 2019 15 // Update Count : 1 16 16 // 17 17 … … 73 73 this.lock{}; 74 74 this.blocked_threads{}; 75 this.owner = NULL;75 this.owner = 0p; 76 76 this.recursion_count = 0; 77 77 } … … 83 83 void lock(recursive_mutex_lock & this) with(this) { 84 84 lock( lock __cfaabi_dbg_ctx2 ); 85 if( owner == NULL) {85 if( owner == 0p ) { 86 86 owner = kernelTLS.this_thread; 87 87 recursion_count = 1; … … 101 101 bool ret = false; 102 102 lock( lock __cfaabi_dbg_ctx2 ); 103 if( owner == NULL) {103 if( owner == 0p ) { 104 104 owner = kernelTLS.this_thread; 105 105 recursion_count = 1; -
libcfa/src/concurrency/mutex.hfa
r807a632 r3b56166 11 11 // Author : Thierry Delisle 12 12 // Created On : Fri May 25 01:24:09 2018 13 // Last Modified By : Thierry Delisle14 // Last Modified On : Fri May 25 01:24:12 201815 // Update Count : 013 // Last Modified By : Peter A. Buhr 14 // Last Modified On : Wed Dec 4 09:16:53 2019 15 // Update Count : 1 16 16 // 17 17 … … 110 110 111 111 static inline void ?{}(lock_scope(L) & this) { 112 this.locks = NULL;112 this.locks = 0p; 113 113 this.count = 0; 114 114 } -
libcfa/src/concurrency/preemption.cfa
r807a632 r3b56166 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : T ue Jun 5 17:35:49 201813 // Update Count : 3712 // Last Modified On : Thu Dec 5 16:34:05 2019 13 // Update Count : 43 14 14 // 15 15 … … 24 24 #include <string.h> 25 25 #include <unistd.h> 26 #include <limits.h> // PTHREAD_STACK_MIN 26 27 } 27 28 … … 64 65 event_kernel_t * event_kernel; // kernel public handle to even kernel 65 66 static pthread_t alarm_thread; // pthread handle to alarm thread 67 static void * alarm_stack; // pthread stack for alarm thread 66 68 67 69 static void ?{}(event_kernel_t & this) with( this ) { … … 81 83 // Get next expired node 82 84 static inline alarm_node_t * get_expired( alarm_list_t * alarms, Time currtime ) { 83 if( !alarms->head ) return NULL;// If no alarms return null84 if( alarms->head->alarm >= currtime ) return NULL;// If alarms head not expired return null85 return pop(alarms); 85 if( !alarms->head ) return 0p; // If no alarms return null 86 if( alarms->head->alarm >= currtime ) return 0p; // If alarms head not expired return null 87 return pop(alarms); // Otherwise just pop head 86 88 } 87 89 88 90 // Tick one frame of the Discrete Event Simulation for alarms 89 91 static void tick_preemption() { 90 alarm_node_t * node = NULL;// Used in the while loop but cannot be declared in the while condition91 alarm_list_t * alarms = &event_kernel->alarms; 92 Time currtime = __kernel_get_time(); // Check current time once so weeverything "happens at once"92 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 93 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading 94 Time currtime = __kernel_get_time(); // Check current time once so everything "happens at once" 93 95 94 96 //Loop throught every thing expired … … 243 245 sigaddset( &mask, sig ); 244 246 245 if ( pthread_sigmask( SIG_UNBLOCK, &mask, NULL) == -1 ) {247 if ( pthread_sigmask( SIG_UNBLOCK, &mask, 0p ) == -1 ) { 246 248 abort( "internal error, pthread_sigmask" ); 247 249 } … … 254 256 sigaddset( &mask, sig ); 255 257 256 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL) == -1 ) {258 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 257 259 abort( "internal error, pthread_sigmask" ); 258 260 } … … 301 303 302 304 // Setup proper signal handlers 303 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); 305 __cfaabi_sigaction( SIGUSR1, sigHandler_ctxSwitch, SA_SIGINFO | SA_RESTART ); // CtxSwitch handler 304 306 305 307 signal_block( SIGALRM ); 306 308 307 pthread_create( &alarm_thread, NULL, alarm_loop, NULL);309 alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p ); 308 310 } 309 311 … … 316 318 sigset_t mask; 317 319 sigfillset( &mask ); 318 sigprocmask( SIG_BLOCK, &mask, NULL);320 sigprocmask( SIG_BLOCK, &mask, 0p ); 319 321 320 322 // Notify the alarm thread of the shutdown … … 323 325 324 326 // Wait for the preemption thread to finish 325 pthread_join( alarm_thread, NULL ); 327 328 pthread_join( alarm_thread, 0p ); 329 free( alarm_stack ); 326 330 327 331 // Preemption is now fully stopped … … 380 384 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 381 385 #endif 382 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL) == -1 ) {386 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), 0p ) == -1 ) { 383 387 abort( "internal error, sigprocmask" ); 384 388 } … … 399 403 sigset_t mask; 400 404 sigfillset(&mask); 401 if ( pthread_sigmask( SIG_BLOCK, &mask, NULL) == -1 ) {405 if ( pthread_sigmask( SIG_BLOCK, &mask, 0p ) == -1 ) { 402 406 abort( "internal error, pthread_sigmask" ); 403 407 } … … 420 424 {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );} 421 425 continue; 422 426 case EINVAL : 423 427 abort( "Timeout was invalid." ); 424 428 default: … … 453 457 EXIT: 454 458 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 455 return NULL;459 return 0p; 456 460 } 457 461 … … 466 470 sigset_t oldset; 467 471 int ret; 468 ret = pthread_sigmask(0, NULL, &oldset);472 ret = pthread_sigmask(0, 0p, &oldset); 469 473 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 470 474 -
libcfa/src/concurrency/thread.cfa
r807a632 r3b56166 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Mar 30 17:19:52 201813 // Update Count : 812 // Last Modified On : Wed Dec 4 09:17:49 2019 13 // Update Count : 9 14 14 // 15 15 … … 33 33 // Thread ctors and dtors 34 34 void ?{}(thread_desc & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 35 context{ NULL, NULL};35 context{ 0p, 0p }; 36 36 self_cor{ name, storage, storageSize }; 37 37 state = Start; … … 41 41 self_mon_p = &self_mon; 42 42 curr_cluster = &cl; 43 next = NULL;43 next = 0p; 44 44 45 node.next = NULL;46 node.prev = NULL;45 node.next = 0p; 46 node.prev = 0p; 47 47 doregister(curr_cluster, this); 48 48 … … 58 58 void ?{}( scoped(T)& this ) with( this ) { 59 59 handle{}; 60 __thrd_start(handle );60 __thrd_start(handle, main); 61 61 } 62 62 … … 64 64 void ?{}( scoped(T)& this, P params ) with( this ) { 65 65 handle{ params }; 66 __thrd_start(handle );66 __thrd_start(handle, main); 67 67 } 68 68 … … 75 75 // Starting and stopping threads 76 76 forall( dtype T | is_thread(T) ) 77 void __thrd_start( T & this) {77 void __thrd_start( T & this, void (*main_p)(T &) ) { 78 78 thread_desc * this_thrd = get_thread(this); 79 79 thread_desc * curr_thrd = TL_GET( this_thread ); 80 80 81 81 disable_interrupts(); 82 CtxStart(&this, CtxInvokeThread); 82 CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread); 83 83 84 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 84 85 verify( this_thrd->context.SP ); 85 CtxSwitch( &curr_thrd->context, &this_thrd->context );86 // CtxSwitch( &curr_thrd->context, &this_thrd->context ); 86 87 87 88 ScheduleThread(this_thrd); 88 89 enable_interrupts( __cfaabi_dbg_ctx ); 89 }90 91 extern "C" {92 // KERNEL ONLY93 void __finish_creation(thread_desc * this) {94 // set new coroutine that the processor is executing95 // and context switch to it96 verify( kernelTLS.this_thread != this );97 verify( kernelTLS.this_thread->context.SP );98 CtxSwitch( &this->context, &kernelTLS.this_thread->context );99 }100 90 } 101 91 -
libcfa/src/concurrency/thread.hfa
r807a632 r3b56166 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Jun 21 17:51:33201913 // Update Count : 512 // Last Modified On : Wed Dec 4 09:18:14 2019 13 // Update Count : 6 14 14 // 15 15 … … 54 54 55 55 forall( dtype T | is_thread(T) ) 56 void __thrd_start( T & this );56 void __thrd_start( T & this, void (*)(T &) ); 57 57 58 58 //----------------------------------------------------------------------------- … … 61 61 void ^?{}(thread_desc & this); 62 62 63 static inline void ?{}(thread_desc & this) { this{ "Anonymous Thread", *mainCluster, NULL, 65000 }; }64 static inline void ?{}(thread_desc & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, NULL, stackSize }; }63 static inline void ?{}(thread_desc & this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; } 64 static inline void ?{}(thread_desc & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; } 65 65 static inline void ?{}(thread_desc & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 66 static inline void ?{}(thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, NULL, 65000 }; }67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, NULL, stackSize }; }66 static inline void ?{}(thread_desc & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; } 67 static inline void ?{}(thread_desc & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; } 68 68 static inline void ?{}(thread_desc & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 69 static inline void ?{}(thread_desc & this, const char * const name) { this{ name, *mainCluster, NULL, 65000 }; }70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, NULL, 65000 }; }71 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, NULL, stackSize }; }69 static inline void ?{}(thread_desc & this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; } 70 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; } 71 static inline void ?{}(thread_desc & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; } 72 72 73 73 //-----------------------------------------------------------------------------
Note:
See TracChangeset
for help on using the changeset viewer.