Changeset 8c50aed for libcfa


Ignore:
Timestamp:
Feb 21, 2020, 3:33:14 PM (5 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
a505021
Parents:
b0c7419
Message:

Some clean-up and renaming, also adding attribute((const/pure)) where relevant

Location:
libcfa/src/concurrency
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/coroutine.hfa

    rb0c7419 r8c50aed  
    5454void prime(T & cor);
    5555
    56 static inline struct coroutine_desc * active_coroutine() { return TL_GET( this_thread )->curr_cor; }
     56static inline struct coroutine_desc * active_coroutine() __attribute__((const)) { return TL_GET( this_thread )->curr_cor; }
    5757
    5858//-----------------------------------------------------------------------------
     
    7373// Private wrappers for context switch and stack creation
    7474// Wrapper for co
    75 static inline void CoroutineCtxSwitch(coroutine_desc* src, coroutine_desc* dst) {
     75static inline void CoroutineCtxSwitch( coroutine_desc * src, coroutine_desc * dst ) __attribute__((nonnull (1, 2))) {
    7676        // set state of current coroutine to inactive
    7777        src->state = src->state == Halted ? Halted : Inactive;
     
    152152}
    153153
    154 static inline void resume(coroutine_desc * dst) {
     154static inline void resume( coroutine_desc * dst ) __attribute__((nonnull (1))) {
    155155        // optimization : read TLS once and reuse it
    156156        // Safety note: this is preemption safe since if
  • libcfa/src/concurrency/invoke.h

    rb0c7419 r8c50aed  
    200200        #ifdef __cforall
    201201        extern "Cforall" {
    202                 static inline thread_desc *& get_next( thread_desc & this ) {
     202                static inline thread_desc *& get_next( thread_desc & this ) __attribute__((const)) {
    203203                        return this.next;
    204204                }
    205205
    206                 static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) {
     206                static inline [thread_desc *&, thread_desc *& ] __get( thread_desc & this ) /*__attribute__((const))*/ {
    207207                        return this.node.[next, prev];
    208208                }
     
    220220                }
    221221
    222                 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
     222                static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) {
    223223                        if( (lhs.data != 0) != (rhs.data != 0) ) return false;
    224224                        if( lhs.size != rhs.size ) return false;
  • libcfa/src/concurrency/kernel.cfa

    rb0c7419 r8c50aed  
    110110//-----------------------------------------------------------------------------
    111111//Start and stop routine for the kernel, declared first to make sure they run first
    112 static void kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
    113 static void kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
     112static void __kernel_startup (void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) ));
     113static void __kernel_shutdown(void) __attribute__(( destructor ( STARTUP_PRIORITY_KERNEL ) ));
    114114
    115115//-----------------------------------------------------------------------------
     
    208208}
    209209
    210 static void start(processor * this);
     210static void * CtxInvokeProcessor(void * arg);
     211
    211212void ?{}(processor & this, const char * name, cluster & cltr) with( this ) {
    212213        this.name = name;
     
    221222        idleLock{};
    222223
    223         start( &this );
     224        __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", &this);
     225
     226        this.stack = __create_pthread( &this.kernel_thread, CtxInvokeProcessor, (void *)&this );
     227
     228        __cfaabi_dbg_print_safe("Kernel : core %p started\n", &this);
    224229}
    225230
     
    259264// Kernel Scheduling logic
    260265//=============================================================================================
    261 static thread_desc * nextThread(cluster * this);
    262 static void runThread(processor * this, thread_desc * dst);
    263 static void halt(processor * this);
     266static thread_desc * __next_thread(cluster * this);
     267static void __run_thread(processor * this, thread_desc * dst);
     268static void __halt(processor * this);
    264269
    265270//Main of the processor contexts
     
    284289                thread_desc * readyThread = 0p;
    285290                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ ) {
    286                         readyThread = nextThread( this->cltr );
     291                        readyThread = __next_thread( this->cltr );
    287292
    288293                        if(readyThread) {
     
    291296                                /* paranoid */ verifyf( readyThread->next == 0p, "Expected null got %p", readyThread->next );
    292297
    293                                 runThread(this, readyThread);
     298                                __run_thread(this, readyThread);
    294299
    295300                                /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
     
    298303                        } else {
    299304                                // spin(this, &spin_count);
    300                                 // halt(this);
     305                                __halt(this);
    301306                        }
    302307                }
     
    318323// runThread runs a thread by context switching
    319324// from the processor coroutine to the target thread
    320 static void runThread(processor * this, thread_desc * thrd_dst) {
     325static void __run_thread(processor * this, thread_desc * thrd_dst) {
    321326        coroutine_desc * proc_cor = get_coroutine(this->runner);
    322327
     
    359364                if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) {
    360365                        // The thread was preempted, reschedule it and reset the flag
    361                         ScheduleThread( thrd_dst );
     366                        __schedule_thread( thrd_dst );
    362367                        break RUNNING;
    363368                }
     
    460465} // Abort
    461466
    462 void * create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
     467void * __create_pthread( pthread_t * pthread, void * (*start)(void *), void * arg ) {
    463468        pthread_attr_t attr;
    464469
     
    488493}
    489494
    490 static void start(processor * this) {
    491         __cfaabi_dbg_print_safe("Kernel : Starting core %p\n", this);
    492 
    493         this->stack = create_pthread( &this->kernel_thread, CtxInvokeProcessor, (void *)this );
    494 
    495         __cfaabi_dbg_print_safe("Kernel : core %p started\n", this);
    496 }
    497 
    498495// KERNEL_ONLY
    499 void kernel_first_resume( processor * this ) {
     496static void __kernel_first_resume( processor * this ) {
    500497        thread_desc * src = mainThread;
    501498        coroutine_desc * dst = get_coroutine(this->runner);
     
    529526
    530527// KERNEL_ONLY
    531 void kernel_last_resume( processor * this ) {
     528static void __kernel_last_resume( processor * this ) {
    532529        coroutine_desc * src = &mainThread->self_cor;
    533530        coroutine_desc * dst = get_coroutine(this->runner);
     
    544541// Scheduler routines
    545542// KERNEL ONLY
    546 void ScheduleThread( thread_desc * thrd ) with( *thrd->curr_cluster ) {
     543void __schedule_thread( thread_desc * thrd ) with( *thrd->curr_cluster ) {
    547544        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    548545        /* paranoid */ #if defined( __CFA_WITH_VERIFY__ )
     
    574571
    575572// KERNEL ONLY
    576 static thread_desc * nextThread(cluster * this) with( *this ) {
     573static thread_desc * __next_thread(cluster * this) with( *this ) {
    577574        /* paranoid */ verify( ! kernelTLS.preemption_state.enabled );
    578575
     
    600597                        // Wake lost the race,
    601598                        thrd->state = Inactive;
    602                         ScheduleThread( thrd );
     599                        __schedule_thread( thrd );
    603600                        break;
    604601                case Rerun:
     
    668665//-----------------------------------------------------------------------------
    669666// Kernel boot procedures
    670 static void kernel_startup(void) {
     667static void __kernel_startup(void) {
    671668        verify( ! kernelTLS.preemption_state.enabled );
    672669        __cfaabi_dbg_print_safe("Kernel : Starting\n");
     
    729726        // Add the main thread to the ready queue
    730727        // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread
    731         ScheduleThread(mainThread);
     728        __schedule_thread(mainThread);
    732729
    733730        // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX
    734731        // context. Hence, the main thread does not begin through CtxInvokeThread, like all other threads. The trick here is that
    735732        // mainThread is on the ready queue when this call is made.
    736         kernel_first_resume( kernelTLS.this_processor );
     733        __kernel_first_resume( kernelTLS.this_processor );
    737734
    738735
     
    746743}
    747744
    748 static void kernel_shutdown(void) {
     745static void __kernel_shutdown(void) {
    749746        __cfaabi_dbg_print_safe("\n--------------------------------------------------\nKernel : Shutting down\n");
    750747
     
    757754        // which is currently here
    758755        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    759         kernel_last_resume( kernelTLS.this_processor );
     756        __kernel_last_resume( kernelTLS.this_processor );
    760757        mainThread->self_cor.state = Halted;
    761758
     
    783780// Kernel Quiescing
    784781//=============================================================================================
    785 static void halt(processor * this) with( *this ) {
     782static void __halt(processor * this) with( *this ) {
    786783        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
    787784
     
    972969//-----------------------------------------------------------------------------
    973970// Debug
    974 bool threading_enabled(void) {
     971bool threading_enabled(void) __attribute__((const)) {
    975972        return true;
    976973}
  • libcfa/src/concurrency/kernel.hfa

    rb0c7419 r8c50aed  
    108108static inline void  ?{}(processor & this, const char * name) { this{name, *mainCluster }; }
    109109
    110 static inline [processor *&, processor *& ] __get( processor & this ) {
    111         return this.node.[next, prev];
    112 }
     110static inline [processor *&, processor *& ] __get( processor & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; }
    113111
    114112//-----------------------------------------------------------------------------
     
    153151static inline void ?{} (cluster & this, const char * name)        { this{name, default_preemption()}; }
    154152
    155 static inline [cluster *&, cluster *& ] __get( cluster & this ) {
    156         return this.node.[next, prev];
    157 }
     153static inline [cluster *&, cluster *& ] __get( cluster & this ) /*__attribute__((const))*/ { return this.node.[next, prev]; }
    158154
    159 static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE
    160 static inline struct cluster   * active_cluster  () { return TL_GET( this_processor )->cltr; }
     155static inline struct processor * active_processor() __attribute__((const)) { return TL_GET( this_processor ); } // UNSAFE
     156static inline struct cluster   * active_cluster  () __attribute__((const)) { return TL_GET( this_processor )->cltr; }
    161157
    162158// Local Variables: //
  • libcfa/src/concurrency/kernel_private.hfa

    rb0c7419 r8c50aed  
    3131}
    3232
    33 void ScheduleThread( thread_desc * ) __attribute__((nonnull (1)));
     33void __schedule_thread( thread_desc * ) __attribute__((nonnull (1)));
    3434
    3535//Block current thread and release/wake-up the following resources
     
    4040void main(processorCtx_t *);
    4141
    42 void * create_pthread( pthread_t *, void * (*)(void *), void * );
     42void * __create_pthread( pthread_t *, void * (*)(void *), void * );
    4343
    4444static inline void wake_fast(processor * this) {
     
    8585#define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
    8686
    87 static inline uint32_t tls_rand() {
     87static inline uint32_t __tls_rand() {
    8888        kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6;
    8989        kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21;
  • libcfa/src/concurrency/preemption.cfa

    rb0c7419 r8c50aed  
    306306        signal_block( SIGALRM );
    307307
    308         alarm_stack = create_pthread( &alarm_thread, alarm_loop, 0p );
     308        alarm_stack = __create_pthread( &alarm_thread, alarm_loop, 0p );
    309309}
    310310
  • libcfa/src/concurrency/thread.cfa

    rb0c7419 r8c50aed  
    2222#define __CFA_INVOKE_PRIVATE__
    2323#include "invoke.h"
    24 
    25 extern "C" {
    26         #include <fenv.h>
    27         #include <stddef.h>
    28 }
    29 
    30 //extern volatile thread_local processor * this_processor;
    3124
    3225//-----------------------------------------------------------------------------
     
    5649}
    5750
     51//-----------------------------------------------------------------------------
     52// Starting and stopping threads
     53forall( dtype T | is_thread(T) )
     54void __thrd_start( T & this, void (*main_p)(T &) ) {
     55        thread_desc * this_thrd = get_thread(this);
     56
     57        disable_interrupts();
     58        CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);
     59
     60        this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
     61        verify( this_thrd->context.SP );
     62
     63        __schedule_thread(this_thrd);
     64        enable_interrupts( __cfaabi_dbg_ctx );
     65}
     66
     67//-----------------------------------------------------------------------------
     68// Support for threads that don't ues the thread keyword
    5869forall( dtype T | sized(T) | is_thread(T) | { void ?{}(T&); } )
    5970void ?{}( scoped(T)& this ) with( this ) {
     
    7384}
    7485
    75 //-----------------------------------------------------------------------------
    76 // Starting and stopping threads
    77 forall( dtype T | is_thread(T) )
    78 void __thrd_start( T & this, void (*main_p)(T &) ) {
    79         thread_desc * this_thrd = get_thread(this);
    80 
    81         disable_interrupts();
    82         CtxStart(main_p, get_coroutine(this), this, CtxInvokeThread);
    83 
    84         this_thrd->context.[SP, FP] = this_thrd->self_cor.context.[SP, FP];
    85         verify( this_thrd->context.SP );
    86 
    87         ScheduleThread(this_thrd);
    88         enable_interrupts( __cfaabi_dbg_ctx );
    89 }
    90 
    9186// Local Variables: //
    9287// mode: c //
  • libcfa/src/concurrency/thread.hfa

    rb0c7419 r8c50aed  
    3131};
    3232
    33 #define DECL_THREAD(X) thread_desc* get_thread(X& this) { return &this.__thrd; } void main(X& this)
     33// define that satisfies the trait without using the thread keyword
     34#define DECL_THREAD(X) thread_desc* get_thread(X& this) __attribute__((const)) { return &this.__thrd; } void main(X& this)
     35
     36// Inline getters for threads/coroutines/monitors
     37forall( dtype T | is_thread(T) )
     38static inline coroutine_desc* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; }
    3439
    3540forall( dtype T | is_thread(T) )
    36 static inline coroutine_desc* get_coroutine(T & this) {
    37         return &get_thread(this)->self_cor;
    38 }
     41static inline monitor_desc  * get_monitor  (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; }
    3942
    40 forall( dtype T | is_thread(T) )
    41 static inline monitor_desc* get_monitor(T & this) {
    42         return &get_thread(this)->self_mon;
    43 }
     43static inline coroutine_desc* get_coroutine(thread_desc * this) __attribute__((const)) { return &this->self_cor; }
     44static inline monitor_desc  * get_monitor  (thread_desc * this) __attribute__((const)) { return &this->self_mon; }
    4445
    45 static inline coroutine_desc* get_coroutine(thread_desc * this) {
    46         return &this->self_cor;
    47 }
    48 
    49 static inline monitor_desc* get_monitor(thread_desc * this) {
    50         return &this->self_mon;
    51 }
    52 
     46//-----------------------------------------------------------------------------
     47// forward declarations needed for threads
    5348extern struct cluster * mainCluster;
    5449
Note: See TracChangeset for help on using the changeset viewer.