- Timestamp:
- Dec 3, 2019, 6:17:58 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 1d60da8, ee0bfa9
- Parents:
- e8c52cf (diff), 1805b1b (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
re8c52cf r3901457 10 10 // Created On : Tue Jan 17 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Nov 29 17:59:16201913 // Update Count : 3512 // Last Modified On : Sun Dec 1 17:52:57 2019 13 // Update Count : 45 14 14 // 15 15 … … 41 41 //----------------------------------------------------------------------------- 42 42 // Some assembly required 43 #if 43 #if defined( __i386 ) 44 44 #define CtxGet( ctx ) \ 45 45 __asm__ volatile ( \ … … 124 124 125 125 extern "C" { 126 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;126 struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters; 127 127 } 128 128 … … 132 132 // Global state 133 133 thread_local struct KernelThreadData kernelTLS __attribute__ ((tls_model ( "initial-exec" ))) = { 134 NULL, 134 NULL, // cannot use 0p 135 135 NULL, 136 136 { NULL, 1, false, false }, … … 141 141 // Struct to steal stack 142 142 struct current_stack_info_t { 143 __stack_t * storage; // pointer to stack object144 void * base;// base of stack145 void * limit;// stack grows towards stack limit146 void * context;// address of cfa_context_t143 __stack_t * storage; // pointer to stack object 144 void * base; // base of stack 145 void * limit; // stack grows towards stack limit 146 void * context; // address of cfa_context_t 147 147 }; 148 148 … … 173 173 name = "Main Thread"; 174 174 state = Start; 175 starter = NULL;176 last = NULL;177 cancellation = NULL;175 starter = 0p; 176 last = 0p; 177 cancellation = 0p; 178 178 } 179 179 … … 186 186 self_mon.recursion = 1; 187 187 self_mon_p = &self_mon; 188 next = NULL;189 190 node.next = NULL;191 node.prev = NULL;188 next = 0p; 189 190 node.next = 0p; 191 node.prev = 0p; 192 192 doregister(curr_cluster, this); 193 193 … … 213 213 terminated{ 0 }; 214 214 do_terminate = false; 215 preemption_alarm = NULL;215 preemption_alarm = 0p; 216 216 pending_preemption = false; 217 217 runner.proc = &this; … … 233 233 } 234 234 235 pthread_join( kernel_thread, NULL);235 pthread_join( kernel_thread, 0p ); 236 236 free( this.stack ); 237 237 } … … 280 280 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); 281 281 282 thread_desc * readyThread = NULL;282 thread_desc * readyThread = 0p; 283 283 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) 284 284 { … … 412 412 processor * proc = (processor *) arg; 413 413 kernelTLS.this_processor = proc; 414 kernelTLS.this_thread = NULL;414 kernelTLS.this_thread = 0p; 415 415 kernelTLS.preemption_state.[enabled, disable_count] = [false, 1]; 416 416 // SKULLDUGGERY: We want to create a context for the processor coroutine … … 425 425 426 426 //Set global state 427 kernelTLS.this_thread = NULL;427 kernelTLS.this_thread = 0p; 428 428 429 429 //We now have a proper context from which to schedule threads … … 441 441 __cfaabi_dbg_print_safe("Kernel : core %p main ended (%p)\n", proc, &proc->runner); 442 442 443 return NULL; 443 return 0p; 444 } 445 446 static void Abort( int ret, const char * func ) { 447 if ( ret ) { 448 abort( "%s : internal error, error(%d) %s.", func, ret, strerror( ret ) ); 449 } // if 450 } // Abort 451 452 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 453 pthread_attr_t attr; 454 455 Abort( pthread_attr_init( &attr ), "pthread_attr_init" ); // initialize attribute 456 457 #ifdef __CFA_DEBUG__ 458 size_t guardsize; 459 Abort( pthread_attr_getguardsize( &attr, &guardsize ), "pthread_attr_getguardsize" ); 460 Abort( pthread_attr_setguardsize( &attr, guardsize ), "pthread_attr_setguardsize" ); 461 #endif 462 463 size_t stacksize; 464 Abort( pthread_attr_getstacksize( &attr, &stacksize ), "pthread_attr_getstacksize" ); // default stack size, normally defined by shell limit 465 assert( stacksize >= PTHREAD_STACK_MIN ); 466 void * stack = malloc( stacksize ); 467 Abort( pthread_attr_setstack( &attr, stack, stacksize ), "pthread_attr_setstack" ); 468 469 Abort( pthread_create( pthread, &attr, start, arg ), "pthread_create" ); 470 return stack; 444 471 } 445 472 … … 447 474 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this); 448 475 449 pthread_attr_t attr; 450 int ret; 451 ret = pthread_attr_init( &attr ); // initialize attribute 452 if ( ret ) { 453 abort( "%s : internal error, pthread_attr_init failed, error(%d) %s.", __PRETTY_FUNCTION__, ret, strerror( ret ) ); 454 } // if 455 456 size_t stacksize; 457 ret = pthread_attr_getstacksize( &attr, &stacksize ); // default stack size, normally defined by shell limit 458 if ( ret ) { 459 abort( "%s : internal error, pthread_attr_getstacksize failed, error(%d) %s.", __PRETTY_FUNCTION__, ret, strerror( ret ) ); 460 } // if 461 assert( stacksize >= PTHREAD_STACK_MIN ); 462 463 this->stack = malloc( stacksize ); 464 ret = pthread_attr_setstack( &attr, this->stack, stacksize ); 465 if ( ret ) { 466 abort( "%s : internal error, pthread_attr_setstack failed, error(%d) %s.", __PRETTY_FUNCTION__, ret, strerror( ret ) ); 467 } // if 468 469 ret = pthread_create( &this->kernel_thread, &attr, CtxInvokeProcessor, (void *)this ); 470 if ( ret ) { 471 abort( "%s : internal error, pthread_create failed, error(%d) %s.", __PRETTY_FUNCTION__, ret, strerror( ret ) ); 472 } // if 473 // pthread_create( &this->kernel_thread, NULL, CtxInvokeProcessor, (void*)this ); 476 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this ); 474 477 475 478 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this); … … 528 531 verify( ! kernelTLS.preemption_state.enabled ); 529 532 530 verifyf( thrd->next == NULL, "Expected null got %p", thrd->next );533 verifyf( thrd->next == 0p, "Expected null got %p", thrd->next ); 531 534 532 535 with( *thrd->curr_cluster ) { … … 707 710 void ?{}(processorCtx_t & this, processor * proc) { 708 711 (this.__cor){ "Processor" }; 709 this.__cor.starter = NULL;712 this.__cor.starter = 0p; 710 713 this.proc = proc; 711 714 } … … 716 719 terminated{ 0 }; 717 720 do_terminate = false; 718 preemption_alarm = NULL;721 preemption_alarm = 0p; 719 722 pending_preemption = false; 720 723 kernel_thread = pthread_self(); … … 910 913 911 914 void V(semaphore & this) with( this ) { 912 thread_desc * thrd = NULL;915 thread_desc * thrd = 0p; 913 916 lock( lock __cfaabi_dbg_ctx2 ); 914 917 count += 1; -
libcfa/src/concurrency/kernel_private.hfa
re8c52cf r3901457 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Mar 29 14:06:40 201813 // Update Count : 312 // Last Modified On : Sat Nov 30 19:25:02 2019 13 // Update Count : 8 14 14 // 15 15 … … 56 56 // Processor 57 57 void main(processorCtx_t *); 58 59 void * create_pthread( pthread_t *, void * (*)(void *), void * ); 58 60 59 61 static inline void wake_fast(processor * this) { -
libcfa/src/concurrency/preemption.cfa
re8c52cf r3901457 10 10 // Created On : Mon Jun 5 14:20:42 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : S at Nov 30 08:02:56 201913 // Update Count : 3912 // Last Modified On : Sun Dec 1 22:22:56 2019 13 // Update Count : 41 14 14 // 15 15 … … 306 306 signal_block( SIGALRM ); 307 307 308 pthread_attr_t attr; 309 int ret; 310 ret = pthread_attr_init( &attr ); // initialize attribute 311 if ( ret ) { 312 abort( "%s : internal error, pthread_attr_init failed, error(%d) %s.", __PRETTY_FUNCTION__, ret, strerror( ret ) ); 313 } // if 314 315 size_t stacksize; 316 ret = pthread_attr_getstacksize( &attr, &stacksize ); // default stack size, normally defined by shell limit 317 if ( ret ) { 318 abort( "%s : internal error, pthread_attr_getstacksize failed, error(%d) %s.", __PRETTY_FUNCTION__, ret, strerror( ret ) ); 319 } // if 320 assert( stacksize >= PTHREAD_STACK_MIN ); 321 322 kernelTLS.preemption_state.stack = malloc( stacksize ); 323 ret = pthread_attr_setstack( &attr, kernelTLS.preemption_state.stack, stacksize ); 324 if ( ret ) { 325 abort( "%s : internal error, pthread_attr_setstack failed, error(%d) %s.", __PRETTY_FUNCTION__, ret, strerror( ret ) ); 326 } // if 327 328 ret = pthread_create( &alarm_thread, &attr, alarm_loop, 0p ); 329 if ( ret ) { 330 abort( "%s : internal error, pthread_create failed, error(%d) %s.", __PRETTY_FUNCTION__, ret, strerror( ret ) ); 331 } // if 308 kernelTLS.preemption_state.stack = create_pthread( &alarm_thread, alarm_loop, 0p ); 332 309 } 333 310
Note: See TracChangeset
for help on using the changeset viewer.