Ignore:
Timestamp:
Jun 27, 2018, 3:28:41 PM (8 years ago)
Author:
Aaron Moss <a3moss@…>
Branches:
new-env, with_gc
Children:
b21c77a
Parents:
0182bfa (diff), 63238a4 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' into with_gc

Location:
src/libcfa/concurrency
Files:
2 added
6 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/invoke.h

    r0182bfa r28f3a19  
    1010// Created On       : Tue Jan 17 12:27:26 2016
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Fri Mar 30 22:33:59 2018
    13 // Update Count     : 30
     12// Last Modified On : Sat May 19 08:23:21 2018
     13// Update Count     : 31
    1414//
    1515
     
    1818#include "bits/locks.h"
    1919
    20 #define TL_GET( member ) kernelTLS.member
    21 #define TL_SET( member, value ) kernelTLS.member = value;
    22 
    2320#ifdef __cforall
    2421extern "C" {
     
    2825#ifndef _INVOKE_H_
    2926#define _INVOKE_H_
     27
     28#ifdef __ARM_ARCH
     29        // function prototypes are only really used by these macros on ARM
     30        void disable_global_interrupts();
     31        void enable_global_interrupts();
     32
     33        #define TL_GET( member ) ( { __typeof__( kernelTLS.member ) target; \
     34                disable_global_interrupts(); \
     35                target = kernelTLS.member; \
     36                enable_global_interrupts(); \
     37                target; } )
     38        #define TL_SET( member, value ) disable_global_interrupts(); \
     39                kernelTLS.member = value; \
     40                enable_global_interrupts();
     41#else
     42        #define TL_GET( member ) kernelTLS.member
     43        #define TL_SET( member, value ) kernelTLS.member = value;
     44#endif
    3045
    3146        #ifdef __cforall
  • src/libcfa/concurrency/kernel

    r0182bfa r28f3a19  
    2323extern "C" {
    2424#include <pthread.h>
     25#include <semaphore.h>
    2526}
    2627
     
    4344extern struct cluster * mainCluster;
    4445
    45 enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule };
     46enum FinishOpCode { No_Action, Release, Schedule, Release_Schedule, Release_Multi, Release_Multi_Schedule, Callback };
     47
     48typedef void (*__finish_callback_fptr_t)(void);
    4649
    4750//TODO use union, many of these fields are mutually exclusive (i.e. MULTI vs NOMULTI)
    4851struct FinishAction {
    4952        FinishOpCode action_code;
     53        /*
     54        // Union of possible actions
     55        union {
     56                // Option 1 : locks and threads
     57                struct {
     58                        // 1 thread or N thread
     59                        union {
     60                                thread_desc * thrd;
     61                                struct {
     62                                        thread_desc ** thrds;
     63                                        unsigned short thrd_count;
     64                                };
     65                        };
     66                        // 1 lock or N lock
     67                        union {
     68                                __spinlock_t * lock;
     69                                struct {
     70                                        __spinlock_t ** locks;
     71                                        unsigned short lock_count;
     72                                };
     73                        };
     74                };
     75                // Option 2 : action pointer
     76                __finish_callback_fptr_t callback;
     77        };
     78        /*/
    5079        thread_desc * thrd;
     80        thread_desc ** thrds;
     81        unsigned short thrd_count;
    5182        __spinlock_t * lock;
    5283        __spinlock_t ** locks;
    5384        unsigned short lock_count;
    54         thread_desc ** thrds;
    55         unsigned short thrd_count;
     85        __finish_callback_fptr_t callback;
     86        //*/
    5687};
    5788static inline void ?{}(FinishAction & this) {
     
    82113        pthread_t kernel_thread;
    83114
     115        // RunThread data
     116        // Action to do after a thread is ran
     117        struct FinishAction finish;
     118
     119        // Preemption data
     120        // Node which is added in the discrete event simulaiton
     121        struct alarm_node_t * preemption_alarm;
     122
     123        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
     124        bool pending_preemption;
     125
     126        // Idle lock
     127        __bin_sem_t idleLock;
     128
    84129        // Termination
    85130        // Set to true to notify the processor should terminate
     
    89134        semaphore terminated;
    90135
    91         // RunThread data
    92         // Action to do after a thread is ran
    93         struct FinishAction finish;
    94 
    95         // Preemption data
    96         // Node which is added in the discrete event simulaiton
    97         struct alarm_node_t * preemption_alarm;
    98 
    99         // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
    100         bool pending_preemption;
    101 
    102         // Idle lock
    103 
    104136        // Link lists fields
    105         struct {
     137        struct __dbg_node_proc {
    106138                struct processor * next;
    107139                struct processor * prev;
     
    150182
    151183        // Link lists fields
    152         struct {
     184        struct __dbg_node_cltr {
    153185                cluster * next;
    154186                cluster * prev;
  • src/libcfa/concurrency/kernel.c

    r0182bfa r28f3a19  
    1616//C Includes
    1717#include <stddef.h>
     18#include <errno.h>
     19#include <string.h>
    1820extern "C" {
    1921#include <stdio.h>
     
    4951thread_desc * mainThread;
    5052
    51 struct { __dllist_t(cluster    ) list; __spinlock_t lock; } global_clusters;
     53extern "C" {
     54struct { __dllist_t(cluster) list; __spinlock_t lock; } __cfa_dbg_global_clusters;
     55}
    5256
    5357//-----------------------------------------------------------------------------
     
    143147        runner.proc = &this;
    144148
     149        idleLock{};
     150
    145151        start( &this );
    146152}
    147153
    148154void ^?{}(processor & this) with( this ){
    149         if( ! do_terminate ) {
     155        if( ! __atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ) {
    150156                __cfaabi_dbg_print_safe("Kernel : core %p signaling termination\n", &this);
    151                 terminate(&this);
    152                 verify(this.do_terminate);
    153                 verify( kernelTLS.this_processor != &this);
     157
     158                __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED);
     159                wake( &this );
     160
    154161                P( terminated );
    155162                verify( kernelTLS.this_processor != &this);
    156                 pthread_join( kernel_thread, NULL );
    157         }
     163        }
     164
     165        pthread_join( kernel_thread, NULL );
    158166}
    159167
     
    194202
    195203                thread_desc * readyThread = NULL;
    196                 for( unsigned int spin_count = 0; ! this->do_terminate; spin_count++ )
     204                for( unsigned int spin_count = 0; ! __atomic_load_n(&this->do_terminate, __ATOMIC_SEQ_CST); spin_count++ )
    197205                {
    198206                        readyThread = nextThread( this->cltr );
     
    213221                        else
    214222                        {
    215                                 spin(this, &spin_count);
     223                                // spin(this, &spin_count);
     224                                halt(this);
    216225                        }
    217226                }
     
    257266// its final actions must be executed from the kernel
    258267void finishRunning(processor * this) with( this->finish ) {
    259         if( action_code == Release ) {
    260                 verify( ! kernelTLS.preemption_state.enabled );
     268        verify( ! kernelTLS.preemption_state.enabled );
     269        choose( action_code ) {
     270        case No_Action:
     271                break;
     272        case Release:
    261273                unlock( *lock );
    262         }
    263         else if( action_code == Schedule ) {
     274        case Schedule:
    264275                ScheduleThread( thrd );
    265         }
    266         else if( action_code == Release_Schedule ) {
    267                 verify( ! kernelTLS.preemption_state.enabled );
     276        case Release_Schedule:
    268277                unlock( *lock );
    269278                ScheduleThread( thrd );
    270         }
    271         else if( action_code == Release_Multi ) {
    272                 verify( ! kernelTLS.preemption_state.enabled );
     279        case Release_Multi:
    273280                for(int i = 0; i < lock_count; i++) {
    274281                        unlock( *locks[i] );
    275282                }
    276         }
    277         else if( action_code == Release_Multi_Schedule ) {
     283        case Release_Multi_Schedule:
    278284                for(int i = 0; i < lock_count; i++) {
    279285                        unlock( *locks[i] );
     
    282288                        ScheduleThread( thrds[i] );
    283289                }
    284         }
    285         else {
    286                 assert(action_code == No_Action);
    287         }
    288 }
    289 
    290 // Handles spinning logic
    291 // TODO : find some strategy to put cores to sleep after some time
    292 void spin(processor * this, unsigned int * spin_count) {
    293         (*spin_count)++;
     290        case Callback:
     291                callback();
     292        default:
     293                abort("KERNEL ERROR: Unexpected action to run after thread");
     294        }
    294295}
    295296
     
    396397        with( *thrd->curr_cluster ) {
    397398                lock  ( ready_queue_lock __cfaabi_dbg_ctx2 );
     399                bool was_empty = !(ready_queue != 0);
    398400                append( ready_queue, thrd );
    399401                unlock( ready_queue_lock );
     402
     403                if(was_empty) {
     404                        lock      (proc_list_lock __cfaabi_dbg_ctx2);
     405                        if(idles) {
     406                                wake_fast(idles.head);
     407                        }
     408                        unlock    (proc_list_lock);
     409                }
     410                else if( struct processor * idle = idles.head ) {
     411                        wake_fast(idle);
     412                }
     413
    400414        }
    401415
     
    497511}
    498512
     513void BlockInternal(__finish_callback_fptr_t callback) {
     514        disable_interrupts();
     515        with( *kernelTLS.this_processor ) {
     516                finish.action_code = Callback;
     517                finish.callback    = callback;
     518        }
     519
     520        verify( ! kernelTLS.preemption_state.enabled );
     521        returnToKernel();
     522        verify( ! kernelTLS.preemption_state.enabled );
     523
     524        enable_interrupts( __cfaabi_dbg_ctx );
     525}
     526
    499527// KERNEL ONLY
    500528void LeaveThread(__spinlock_t * lock, thread_desc * thrd) {
     
    518546        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    519547
    520         global_clusters.list{ __get };
    521         global_clusters.lock{};
     548        __cfa_dbg_global_clusters.list{ __get };
     549        __cfa_dbg_global_clusters.lock{};
    522550
    523551        // Initialize the main cluster
     
    600628        // When its coroutine terminates, it return control to the mainThread
    601629        // which is currently here
    602         mainProcessor->do_terminate = true;
     630        __atomic_store_n(&mainProcessor->do_terminate, true, __ATOMIC_RELEASE);
    603631        returnToKernel();
     632        mainThread->self_cor.state = Halted;
    604633
    605634        // THE SYSTEM IS NOW COMPLETELY STOPPED
     
    617646        ^(mainThread){};
    618647
    619         ^(global_clusters.list){};
    620         ^(global_clusters.lock){};
     648        ^(__cfa_dbg_global_clusters.list){};
     649        ^(__cfa_dbg_global_clusters.lock){};
    621650
    622651        __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
     
    627656//=============================================================================================
    628657
    629 // void halt(processor * this) with( this ) {
    630 //      pthread_mutex_lock( &idle.lock );
    631 
    632 
    633 
    634 //      // SKULLDUGGERY: Even if spurious wake-up is a thing
    635 //      // spuriously waking up a kernel thread is not a big deal
    636 //      // if it is very rare.
    637 //      pthread_cond_wait( &idle.cond, &idle.lock);
    638 //      pthread_mutex_unlock( &idle.lock );
    639 // }
    640 
    641 // void wake(processor * this) with( this ) {
    642 //      pthread_mutex_lock  (&idle.lock);
    643 //      pthread_cond_signal (&idle.cond);
    644 //      pthread_mutex_unlock(&idle.lock);
    645 // }
     658void halt(processor * this) with( *this ) {
     659        // verify( ! __atomic_load_n(&do_terminate, __ATOMIC_SEQ_CST) );
     660
     661        with( *cltr ) {
     662                lock      (proc_list_lock __cfaabi_dbg_ctx2);
     663                remove    (procs, *this);
     664                push_front(idles, *this);
     665                unlock    (proc_list_lock);
     666        }
     667
     668        __cfaabi_dbg_print_safe("Kernel : Processor %p ready to sleep\n", this);
     669
     670        wait( idleLock );
     671
     672        __cfaabi_dbg_print_safe("Kernel : Processor %p woke up and ready to run\n", this);
     673
     674        with( *cltr ) {
     675                lock      (proc_list_lock __cfaabi_dbg_ctx2);
     676                remove    (idles, *this);
     677                push_front(procs, *this);
     678                unlock    (proc_list_lock);
     679        }
     680}
    646681
    647682//=============================================================================================
     
    758793// Global Queues
    759794void doregister( cluster     & cltr ) {
    760         lock      ( global_clusters.lock __cfaabi_dbg_ctx2);
    761         push_front( global_clusters.list, cltr );
    762         unlock    ( global_clusters.lock );
     795        lock      ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
     796        push_front( __cfa_dbg_global_clusters.list, cltr );
     797        unlock    ( __cfa_dbg_global_clusters.lock );
    763798}
    764799
    765800void unregister( cluster     & cltr ) {
    766         lock  ( global_clusters.lock __cfaabi_dbg_ctx2);
    767         remove( global_clusters.list, cltr );
    768         unlock( global_clusters.lock );
     801        lock  ( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2);
     802        remove( __cfa_dbg_global_clusters.list, cltr );
     803        unlock( __cfa_dbg_global_clusters.lock );
    769804}
    770805
  • src/libcfa/concurrency/kernel_private.h

    r0182bfa r28f3a19  
    4848void BlockInternal(__spinlock_t * locks [], unsigned short count);
    4949void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count);
     50void BlockInternal(__finish_callback_fptr_t callback);
    5051void LeaveThread(__spinlock_t * lock, thread_desc * thrd);
    5152
     
    5657void runThread(processor * this, thread_desc * dst);
    5758void finishRunning(processor * this);
    58 void terminate(processor * this);
    59 void spin(processor * this, unsigned int * spin_count);
     59void halt(processor * this);
     60
     61static inline void wake_fast(processor * this) {
     62        __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this);
     63        post( this->idleLock );
     64}
     65
     66static inline void wake(processor * this) {
     67        disable_interrupts();
     68        wake_fast(this);
     69        enable_interrupts( __cfaabi_dbg_ctx );
     70}
    6071
    6172struct event_kernel_t {
     
    6576
    6677extern event_kernel_t * event_kernel;
    67 
    68 //extern thread_local coroutine_desc * volatile this_coroutine;
    69 //extern thread_local thread_desc *    volatile this_thread;
    70 //extern thread_local processor *      volatile this_processor;
    71 
    72 // extern volatile thread_local bool preemption_in_progress;
    73 // extern volatile thread_local bool preemption_enabled;
    74 // extern volatile thread_local unsigned short disable_preempt_count;
    7578
    7679struct __cfa_kernel_preemption_state_t {
  • src/libcfa/concurrency/monitor.c

    r0182bfa r28f3a19  
    297297        this.count = count;
    298298
    299         // Sort monitors based on address -> TODO use a sort specialized for small numbers
     299        // Sort monitors based on address
    300300        __libcfa_small_sort(this.m, count);
    301301
  • src/libcfa/concurrency/preemption.c

    r0182bfa r28f3a19  
    1010// Created On       : Mon Jun 5 14:20:42 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Apr  9 13:52:39 2018
    13 // Update Count     : 36
     12// Last Modified On : Tue Jun  5 17:35:49 2018
     13// Update Count     : 37
    1414//
    1515
     
    116116        // If there are still alarms pending, reset the timer
    117117        if( alarms->head ) {
    118                 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%lu(%lu) resetting alarm to %lu.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
     118                __cfaabi_dbg_print_buffer_decl( " KERNEL: @%ju(%ju) resetting alarm to %ju.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
    119119                Duration delta = alarms->head->alarm - currtime;
    120120                Duration caped = max(delta, 50`us);
     
    161161        void disable_interrupts() {
    162162                with( kernelTLS.preemption_state ) {
     163                        #if GCC_VERSION > 50000
    163164                        static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     165                        #endif
    164166
    165167                        // Set enabled flag to false
     
    190192                        // Check if we need to prempt the thread because an interrupt was missed
    191193                        if( prev == 1 ) {
     194                                #if GCC_VERSION > 50000
    192195                                static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     196                                #endif
    193197
    194198                                // Set enabled flag to true
     
    217221                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    218222                if( prev == 1 ) {
     223                        #if GCC_VERSION > 50000
    219224                        static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
     225                        #endif
    220226                        // Set enabled flag to true
    221227                        // should be atomic to avoid preemption in the middle of the operation.
     
    254260static void preempt( processor * this ) {
    255261        sigval_t value = { PREEMPT_NORMAL };
    256         pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
    257 }
    258 
    259 // kill wrapper : signal a processor
    260 void terminate(processor * this) {
    261         this->do_terminate = true;
    262         sigval_t value = { PREEMPT_TERMINATE };
    263262        pthread_sigqueue( this->kernel_thread, SIGUSR1, value );
    264263}
     
    362361        choose(sfp->si_value.sival_int) {
    363362                case PREEMPT_NORMAL   : ;// Normal case, nothing to do here
    364                 case PREEMPT_TERMINATE: verify( kernelTLS.this_processor->do_terminate);
     363                case PREEMPT_TERMINATE: verify( __atomic_load_n( &kernelTLS.this_processor->do_terminate, __ATOMIC_SEQ_CST ) );
    365364                default:
    366365                        abort( "internal error, signal value is %d", sfp->si_value.sival_int );
     
    376375
    377376        // Clear sighandler mask before context switching.
     377        #if GCC_VERSION > 50000
    378378        static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" );
     379        #endif
    379380        if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) {
    380381                abort( "internal error, sigprocmask" );
     
    479480}
    480481
     482#ifdef __CFA_WITH_VERIFY__
     483bool __cfaabi_dbg_in_kernel() {
     484        return !kernelTLS.preemption_state.enabled;
     485}
     486#endif
     487
    481488// Local Variables: //
    482489// mode: c //
Note: See TracChangeset for help on using the changeset viewer.