Changeset 8c50aed for libcfa/src
- Timestamp:
- Feb 21, 2020, 3:33:14 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- a505021
- Parents:
- b0c7419
- Location:
- libcfa/src/concurrency
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/coroutine.hfa
rb0c7419 r8c50aed 54 54 void prime(T & cor); 55 55 56 static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; }56 static inline struct coroutine_desc * active_coroutine() __attribute__((const)) { return TL_GET( this_thread )->curr_cor; } 57 57 58 58 //----------------------------------------------------------------------------- … … 73 73 // Private wrappers for context switch and stack creation 74 74 // Wrapper for co 75 static inline void CoroutineCtxSwitch( coroutine_desc* src, coroutine_desc* dst) {75 static inline void CoroutineCtxSwitch( coroutine_desc * src, coroutine_desc * dst ) __attribute__((nonnull (1, 2))) { 76 76 // set state of current coroutine to inactive 77 77 src->state = src->state == Halted ? Halted : Inactive; … … 152 152 } 153 153 154 static inline void resume( coroutine_desc * dst) {154 static inline void resume( coroutine_desc * dst ) __attribute__((nonnull (1))) { 155 155 // optimization : read TLS once and reuse it 156 156 // Safety note: this is preemption safe since if -
libcfa/src/concurrency/invoke.h
rb0c7419 r8c50aed 200 200 #ifdef __cforall 201 201 extern "Cforall" { 202 static inline thread_desc *& get_next( thread_desc & this ) {202 static inline thread_desc *& get_next( thread_desc & this ) __attribute__((const)) { 203 203 return this.next; 204 204 } 205 205 206 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) {206 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) /*__attribute__((const))*/ { 207 207 return this.node.[next, prev]; 208 208 } … … 220 220 } 221 221 222 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {222 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) { 223 223 if( (lhs.data != 0) != (rhs.data != 0) ) return false; 224 224 if( lhs.size != rhs.size ) return false; -
libcfa/src/concurrency/kernel.cfa
rb0c7419 r8c50aed 110 110 //----------------------------------------------------------------------------- 111 111 //Start and stop routine for the kernel, declared first to make sure they run first 112 static void kernel_startup(void)__attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));113 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));112 static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 113 static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) )); 114 114 115 115 //----------------------------------------------------------------------------- … … 208 208 } 209 209 210 static void start(processor * this); 210 static void * CtxInvokeProcessor(void * arg); 211 211 212 void ?{}(processor & this, const char * name, cluster & cltr) with( this ) { 212 213 this.name = name; … … 221 222 idleLock{}; 222 223 223 start( &this ); 224 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this); 225 226 this.stack = __create_pthread( &this.kernel_thread, CtxInvokeProcessor, (void *)&this ); 227 228 __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this); 224 229 } 225 230 … … 259 264 // Kernel Scheduling logic 260 265 //============================================================================================= 261 static thread_desc * nextThread(cluster * this);262 static void runThread(processor * this, thread_desc * dst);263 static void halt(processor * this);266 static thread_desc * __next_thread(cluster * this); 267 static void __run_thread(processor * this, thread_desc * dst); 268 static void __halt(processor * this); 264 269 265 270 //Main of the processor contexts … … 284 289 thread_desc * readyThread = 0p; 285 290 for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) { 286 readyThread = nextThread( this->cltr );291 readyThread = __next_thread( this->cltr ); 287 292 288 293 if(readyThread) { … … 291 296 /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next ); 292 297 293 runThread(this, readyThread);298 __run_thread(this, readyThread); 294 299 295 300 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 298 303 } else { 299 304 // spin(this, &spin_count); 300 //halt(this);305 __halt(this); 301 306 } 302 307 } … … 318 323 // runThread runs a thread by context switching 319 324 // from the processor coroutine to the target thread 320 static void runThread(processor * this, thread_desc * thrd_dst) {325 static void __run_thread(processor * this, thread_desc * thrd_dst) { 321 326 coroutine_desc * proc_cor = get_coroutine(this->runner); 322 327 … … 359 364 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 360 365 // The thread was preempted, reschedule it and reset the flag 361 ScheduleThread( thrd_dst );366 __schedule_thread( thrd_dst ); 362 367 break RUNNING; 363 368 } … … 460 465 } // Abort 461 466 462 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {467 void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) { 463 468 pthread_attr_t attr; 464 469 … … 488 493 } 489 494 490 static void start(processor * this) {491 __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);492 493 this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );494 495 __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);496 }497 498 495 // KERNEL_ONLY 499 voidkernel_first_resume( processor * this ) {496 static void __kernel_first_resume( processor * this ) { 500 497 thread_desc * src = mainThread; 501 498 coroutine_desc * dst = get_coroutine(this->runner); … … 529 526 530 527 // KERNEL_ONLY 531 voidkernel_last_resume( processor * this ) {528 static void __kernel_last_resume( processor * this ) { 532 529 coroutine_desc * src = &mainThread->self_cor; 533 530 coroutine_desc * dst = get_coroutine(this->runner); … … 544 541 // Scheduler routines 545 542 // KERNEL ONLY 546 void ScheduleThread( thread_desc * thrd ) with( *thrd->curr_cluster ) {543 void __schedule_thread( thread_desc * thrd ) with( *thrd->curr_cluster ) { 547 544 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 548 545 /* paranoid */ #if defined( __CFA_WITH_VERIFY__ ) … … 574 571 575 572 // KERNEL ONLY 576 static thread_desc * nextThread(cluster * this) with( *this ) {573 static thread_desc * __next_thread(cluster * this) with( *this ) { 577 574 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 578 575 … … 600 597 // Wake lost the race, 601 598 thrd->state = Inactive; 602 ScheduleThread( thrd );599 __schedule_thread( thrd ); 603 600 break; 604 601 case Rerun: … … 668 665 //----------------------------------------------------------------------------- 669 666 // Kernel boot procedures 670 static void kernel_startup(void) {667 static void __kernel_startup(void) { 671 668 verify( ! kernelTLS.preemption_state.enabled ); 672 669 __cfaabi_dbg_print_safe("Kernel : Starting\n"); … … 729 726 // Add the main thread to the ready queue 730 727 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 731 ScheduleThread(mainThread);728 __schedule_thread(mainThread); 732 729 733 730 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX 734 731 // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that 735 732 // mainThread is on the ready queue when this call is made. 736 kernel_first_resume( kernelTLS.this_processor );733 __kernel_first_resume( kernelTLS.this_processor ); 737 734 738 735 … … 746 743 } 747 744 748 static void kernel_shutdown(void) {745 static void __kernel_shutdown(void) { 749 746 __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n"); 750 747 … … 757 754 // which is currently here 758 755 __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE); 759 kernel_last_resume( kernelTLS.this_processor );756 __kernel_last_resume( kernelTLS.this_processor ); 760 757 mainThread->self_cor.state = Halted; 761 758 … … 783 780 // Kernel Quiescing 784 781 //============================================================================================= 785 static void halt(processor * this) with( *this ) {782 static void __halt(processor * this) with( *this ) { 786 783 // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) ); 787 784 … … 972 969 //----------------------------------------------------------------------------- 973 970 // Debug 974 bool threading_enabled(void) {971 bool threading_enabled(void) __attribute__((const)) { 975 972 return true; 976 973 } -
libcfa/src/concurrency/kernel.hfa
rb0c7419 r8c50aed 108 108 static inline void ?{}(processor & this, const char * name) { this{name, *mainCluster }; } 109 109 110 static inline [processor *&, processor *& ] __get( processor & this ) { 111 return this.node.[next, prev]; 112 } 110 static inline [processor *&, processor *& ] __get( processor & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; } 113 111 114 112 //----------------------------------------------------------------------------- … … 153 151 static inline void ?{} (cluster & this, const char * name) { this{name, default_preemption()}; } 154 152 155 static inline [cluster *&, cluster *& ] __get( cluster & this ) { 156 return this.node.[next, prev]; 157 } 153 static inline [cluster *&, cluster *& ] __get( cluster & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; } 158 154 159 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE160 static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; }155 static inline struct processor * active_processor() __attribute__((const)) { return TL_GET( this_processor ); } // UNSAFE 156 static inline struct cluster * active_cluster () __attribute__((const)) { return TL_GET( this_processor )->cltr; } 161 157 162 158 // Local Variables: // -
libcfa/src/concurrency/kernel_private.hfa
rb0c7419 r8c50aed 31 31 } 32 32 33 void ScheduleThread( thread_desc * ) __attribute__((nonnull (1)));33 void __schedule_thread( thread_desc * ) __attribute__((nonnull (1))); 34 34 35 35 //Block current thread and release/wake-up the following resources … … 40 40 void main(processorCtx_t *); 41 41 42 void * create_pthread( pthread_t *, void * (*)(void *), void * );42 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); 43 43 44 44 static inline void wake_fast(processor * this) { … … 85 85 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)] 86 86 87 static inline uint32_t tls_rand() {87 static inline uint32_t __tls_rand() { 88 88 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6; 89 89 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21; -
libcfa/src/concurrency/preemption.cfa
rb0c7419 r8c50aed 306 306 signal_block( SIGALRM ); 307 307 308 alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p );308 alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p ); 309 309 } 310 310 -
libcfa/src/concurrency/thread.cfa
rb0c7419 r8c50aed 22 22 #define __CFA_INVOKE_PRIVATE__ 23 23 #include "invoke.h" 24 25 extern "C" {26 #include <fenv.h>27 #include <stddef.h>28 }29 30 //extern volatile thread_local processor * this_processor;31 24 32 25 //----------------------------------------------------------------------------- … … 56 49 } 57 50 51 //----------------------------------------------------------------------------- 52 // Starting and stopping threads 53 forall( dtype T | is_thread(T) ) 54 void __thrd_start( T & this, void (*main_p)(T &) ) { 55 thread_desc * this_thrd = get_thread(this); 56 57 disable_interrupts(); 58 CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread); 59 60 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP]; 61 verify( this_thrd->context.SP ); 62 63 __schedule_thread(this_thrd); 64 enable_interrupts( __cfaabi_dbg_ctx ); 65 } 66 67 //----------------------------------------------------------------------------- 68 // Support for threads that don't ues the thread keyword 58 69 forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } ) 59 70 void ?{}( scoped(T)& this ) with( this ) { … … 73 84 } 74 85 75 //-----------------------------------------------------------------------------76 // Starting and stopping threads77 forall( dtype T | is_thread(T) )78 void __thrd_start( T & this, void (*main_p)(T &) ) {79 thread_desc * this_thrd = get_thread(this);80 81 disable_interrupts();82 CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);83 84 this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];85 verify( this_thrd->context.SP );86 87 ScheduleThread(this_thrd);88 enable_interrupts( __cfaabi_dbg_ctx );89 }90 91 86 // Local Variables: // 92 87 // mode: c // -
libcfa/src/concurrency/thread.hfa
rb0c7419 r8c50aed 31 31 }; 32 32 33 #define DECL_THREAD(X) thread_desc* get_thread(X& this) { return &this.__thrd; } void main(X& this) 33 // define that satisfies the trait without using the thread keyword 34 #define DECL_THREAD(X) thread_desc* get_thread(X& this) __attribute__((const)) { return &this.__thrd; } void main(X& this) 35 36 // Inline getters for threads/coroutines/monitors 37 forall( dtype T | is_thread(T) ) 38 static inline coroutine_desc* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; } 34 39 35 40 forall( dtype T | is_thread(T) ) 36 static inline coroutine_desc* get_coroutine(T & this) { 37 return &get_thread(this)->self_cor; 38 } 41 static inline monitor_desc * get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; } 39 42 40 forall( dtype T | is_thread(T) ) 41 static inline monitor_desc* get_monitor(T & this) { 42 return &get_thread(this)->self_mon; 43 } 43 static inline coroutine_desc* get_coroutine(thread_desc * this) __attribute__((const)) { return &this->self_cor; } 44 static inline monitor_desc * get_monitor (thread_desc * this) __attribute__((const)) { return &this->self_mon; } 44 45 45 static inline coroutine_desc* get_coroutine(thread_desc * this) { 46 return &this->self_cor; 47 } 48 49 static inline monitor_desc* get_monitor(thread_desc * this) { 50 return &this->self_mon; 51 } 52 46 //----------------------------------------------------------------------------- 47 // forward declarations needed for threads 53 48 extern struct cluster * mainCluster; 54 49
Note: See TracChangeset
for help on using the changeset viewer.