Changeset 025278e for src


Ignore:
Timestamp:
Nov 2, 2017, 4:38:32 PM (7 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, resolv-new, with_gc
Children:
513daec
Parents:
8fc45b7
Message:

Changed comments to be above fields, to help source control diff readability

Location:
src/libcfa/concurrency
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/invoke.h

    r8fc45b7 r025278e  
    2525#define _INVOKE_H_
    2626
    27       #define unlikely(x)    __builtin_expect(!!(x), 0)
    28       #define thread_local _Thread_local
    29 
    30       typedef void (*fptr_t)();
    31 
    32       struct spinlock {
    33             volatile int lock;
    34             #ifdef __CFA_DEBUG__
    35                   const char * prev_name;
    36                   void* prev_thrd;
    37             #endif
    38       };
    39 
    40       struct __thread_queue_t {
    41             struct thread_desc * head;
    42             struct thread_desc ** tail;
    43       };
    44 
    45       struct __condition_stack_t {
    46             struct __condition_criterion_t * top;
    47       };
    48 
    49       #ifdef __CFORALL__
    50       extern "Cforall" {
    51             void ?{}( struct __thread_queue_t & );
    52             void append( struct __thread_queue_t &, struct thread_desc * );
    53             struct thread_desc * pop_head( struct __thread_queue_t & );
    54             struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** );
    55 
    56             void ?{}( struct __condition_stack_t & );
    57             void push( struct __condition_stack_t &, struct __condition_criterion_t * );
    58             struct __condition_criterion_t * pop( struct __condition_stack_t & );
    59 
    60             void  ?{}(spinlock & this);
    61             void ^?{}(spinlock & this);
    62       }
    63       #endif
    64 
    65       struct coStack_t {
    66             unsigned int size;                        // size of stack
    67             void *storage;                            // pointer to stack
    68             void *limit;                              // stack grows towards stack limit
    69             void *base;                               // base of stack
    70             void *context;                            // address of cfa_context_t
    71             void *top;                                // address of top of storage
    72             bool userStack;                           // whether or not the user allocated the stack
    73       };
    74 
    75       enum coroutine_state { Halted, Start, Inactive, Active, Primed };
    76 
    77       struct coroutine_desc {
    78             struct coStack_t stack;                   // stack information of the coroutine
    79             const char *name;                         // textual name for coroutine/task, initialized by uC++ generated code
    80             int errno_;                               // copy of global UNIX variable errno
    81             enum coroutine_state state;               // current execution status for coroutine
    82             struct coroutine_desc * starter;          // first coroutine to resume this one
    83             struct coroutine_desc * last;             // last coroutine to resume this one
    84       };
    85 
    86       struct __waitfor_mask_t {
    87             short * accepted;                         // the index of the accepted function, -1 if none
    88             struct __acceptable_t * clauses;          // list of acceptable functions, null if any
    89             short size;                               // number of acceptable functions
    90       };
    91 
    92       struct monitor_desc {
    93             struct spinlock lock;                     // spinlock to protect internal data
    94             struct thread_desc * owner;               // current owner of the monitor
    95             struct __thread_queue_t entry_queue;      // queue of threads that are blocked waiting for the monitor
    96             struct __condition_stack_t signal_stack;  // stack of conditions to run next once we exit the monitor
    97             unsigned int recursion;                   // monitor routines can be called recursively, we need to keep track of that
    98             struct __waitfor_mask_t mask;             // mask used to know if some thread is waiting for something while holding the monitor
    99             struct __condition_node_t * dtor_node;    // node used to signal the dtor in a waitfor dtor
    100       };
    101 
    102       struct __monitor_group_t {
    103             struct monitor_desc ** list;              // currently held monitors
    104             short                  size;              // number of currently held monitors
    105             fptr_t                 func;              // last function that acquired monitors
    106       };
    107 
    108       struct thread_desc {
    109             // Core threading fields
    110             struct coroutine_desc  self_cor;          // coroutine body used to store context
    111             struct monitor_desc    self_mon;          // monitor body used for mutual exclusion
    112             struct monitor_desc *  self_mon_p;        // pointer to monitor with sufficient lifetime for current monitors
    113             struct __monitor_group_t monitors;        // monitors currently held by this thread
    114 
    115             // Link lists fields
    116             struct thread_desc * next;                // instrusive link field for threads
    117 
    118 
     27        #define unlikely(x)    __builtin_expect(!!(x), 0)
     28        #define thread_local _Thread_local
     29
     30        typedef void (*fptr_t)();
     31
     32        struct spinlock {
     33                volatile int lock;
     34                #ifdef __CFA_DEBUG__
     35                        const char * prev_name;
     36                        void* prev_thrd;
     37                #endif
     38        };
     39
     40        struct __thread_queue_t {
     41                struct thread_desc * head;
     42                struct thread_desc ** tail;
     43        };
     44
     45        struct __condition_stack_t {
     46                struct __condition_criterion_t * top;
     47        };
     48
     49        #ifdef __CFORALL__
     50        extern "Cforall" {
     51                void ?{}( struct __thread_queue_t & );
     52                void append( struct __thread_queue_t &, struct thread_desc * );
     53                struct thread_desc * pop_head( struct __thread_queue_t & );
     54                struct thread_desc * remove( struct __thread_queue_t &, struct thread_desc ** );
     55
     56                void ?{}( struct __condition_stack_t & );
     57                void push( struct __condition_stack_t &, struct __condition_criterion_t * );
     58                struct __condition_criterion_t * pop( struct __condition_stack_t & );
     59
     60                void  ?{}(spinlock & this);
     61                void ^?{}(spinlock & this);
     62        }
     63        #endif
     64
     65        struct coStack_t {
     66                // size of stack
     67                unsigned int size;
     68
     69                // pointer to stack
     70                void *storage;
     71
     72                // stack grows towards stack limit
     73                void *limit;
     74
     75                // base of stack
     76                void *base;
     77
     78                // address of cfa_context_t
     79                void *context;
     80
     81                // address of top of storage
     82                void *top;
     83
     84                // whether or not the user allocated the stack
     85                bool userStack;
     86
     87        };
     88
     89        enum coroutine_state { Halted, Start, Inactive, Active, Primed };
     90
     91        struct coroutine_desc {
     92                // stack information of the coroutine
     93                struct coStack_t stack;
     94
     95                // textual name for coroutine/task, initialized by uC++ generated code
     96                const char *name;
     97
     98                // copy of global UNIX variable errno
     99                int errno_;
     100
     101                // current execution status for coroutine
     102                enum coroutine_state state;
     103
     104                // first coroutine to resume this one
     105                struct coroutine_desc * starter;
     106
     107                // last coroutine to resume this one
     108                struct coroutine_desc * last;
     109        };
     110
     111        struct __waitfor_mask_t {
     112                // the index of the accepted function, -1 if none
     113                short * accepted;
     114
     115                // list of acceptable functions, null if any
     116                struct __acceptable_t * clauses;
     117
     118                // number of acceptable functions
     119                short size;
     120        };
     121
     122        struct monitor_desc {
     123                // spinlock to protect internal data
     124                struct spinlock lock;
     125
     126                // current owner of the monitor
     127                struct thread_desc * owner;
     128
     129                // queue of threads that are blocked waiting for the monitor
     130                struct __thread_queue_t entry_queue;
     131
     132                // stack of conditions to run next once we exit the monitor
     133                struct __condition_stack_t signal_stack;
     134
     135                // monitor routines can be called recursively, we need to keep track of that
     136                unsigned int recursion;
     137
     138                // mask used to know if some thread is waiting for something while holding the monitor
     139                struct __waitfor_mask_t mask;
     140
     141                // node used to signal the dtor in a waitfor dtor
     142                struct __condition_node_t * dtor_node;
     143        };
     144
     145        struct __monitor_group_t {
     146                // currently held monitors
     147                struct monitor_desc ** list;
     148
     149                // number of currently held monitors
     150                short                  size;
     151
     152                // last function that acquired monitors
     153                fptr_t                 func;
     154        };
     155
     156        struct thread_desc {
     157                // Core threading fields
     158                // coroutine body used to store context
     159                struct coroutine_desc  self_cor;
     160
     161                // monitor body used for mutual exclusion
     162                struct monitor_desc    self_mon;
     163
     164                // pointer to monitor with sufficient lifetime for current monitors
     165                struct monitor_desc *  self_mon_p;
     166
     167                // monitors currently held by this thread
     168                struct __monitor_group_t monitors;
     169
     170
     171                // Link lists fields
     172                // instrusive link field for threads
     173                struct thread_desc * next;
    119174     };
    120175
    121176     #ifdef __CFORALL__
    122177     extern "Cforall" {
    123             static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) {
    124                   return this.list[index];
    125             }
    126 
    127             static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
    128                   if( (lhs.list != 0) != (rhs.list != 0) ) return false;
    129                   if( lhs.size != rhs.size ) return false;
    130                   if( lhs.func != rhs.func ) return false;
    131 
    132                   // Check that all the monitors match
    133                   for( int i = 0; i < lhs.size; i++ ) {
    134                         // If not a match, check next function
    135                         if( lhs[i] != rhs[i] ) return false;
    136                   }
    137 
    138                   return true;
    139             }
    140       }
    141       #endif
     178                static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) {
     179                        return this.list[index];
     180                }
     181
     182                static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) {
     183                        if( (lhs.list != 0) != (rhs.list != 0) ) return false;
     184                        if( lhs.size != rhs.size ) return false;
     185                        if( lhs.func != rhs.func ) return false;
     186
     187                        // Check that all the monitors match
     188                        for( int i = 0; i < lhs.size; i++ ) {
     189                                // If not a match, check next function
     190                                if( lhs[i] != rhs[i] ) return false;
     191                        }
     192
     193                        return true;
     194                }
     195        }
     196        #endif
    142197
    143198#endif //_INVOKE_H_
     
    146201#define _INVOKE_PRIVATE_H_
    147202
    148       struct machine_context_t {
    149             void *SP;
    150             void *FP;
    151             void *PC;
    152       };
    153 
    154       // assembler routines that performs the context switch
    155       extern void CtxInvokeStub( void );
    156       void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
    157 
    158       #if   defined( __x86_64__ )
    159       #define CtxGet( ctx ) __asm__ ( \
    160                   "movq %%rsp,%0\n"   \
    161                   "movq %%rbp,%1\n"   \
    162             : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    163       #elif defined( __i386__ )
    164       #define CtxGet( ctx ) __asm__ ( \
    165                   "movl %%esp,%0\n"   \
    166                   "movl %%ebp,%1\n"   \
    167             : "=rm" (ctx.SP), "=rm" (ctx.FP) )
    168       #endif
     203        struct machine_context_t {
     204                void *SP;
     205                void *FP;
     206                void *PC;
     207        };
     208
     209        // assembler routines that performs the context switch
     210        extern void CtxInvokeStub( void );
     211        void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
     212
     213        #if   defined( __x86_64__ )
     214        #define CtxGet( ctx ) __asm__ ( \
     215                        "movq %%rsp,%0\n"   \
     216                        "movq %%rbp,%1\n"   \
     217                : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     218        #elif defined( __i386__ )
     219        #define CtxGet( ctx ) __asm__ ( \
     220                        "movl %%esp,%0\n"   \
     221                        "movl %%ebp,%1\n"   \
     222                : "=rm" (ctx.SP), "=rm" (ctx.FP) )
     223        #endif
    169224
    170225#endif //_INVOKE_PRIVATE_H_
  • src/libcfa/concurrency/kernel

    r8fc45b7 r025278e  
    2626//-----------------------------------------------------------------------------
    2727// Locks
    28 void lock      ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, spin if already acquired
    29 void lock_yield( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, yield repeatedly if already acquired
    30 bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );       // Lock the spinlock, return false if already acquired
    31 void unlock    ( spinlock * );                        // Unlock the spinlock
     28// Lock the spinlock, spin if already acquired
     29void lock      ( spinlock * DEBUG_CTX_PARAM2 );
     30
     31// Lock the spinlock, yield repeatedly if already acquired
     32void lock_yield( spinlock * DEBUG_CTX_PARAM2 );
     33
     34// Lock the spinlock, return false if already acquired
     35bool try_lock  ( spinlock * DEBUG_CTX_PARAM2 );
     36
     37// Unlock the spinlock
     38void unlock    ( spinlock * );
    3239
    3340struct semaphore {
     
    4653// Cluster
    4754struct cluster {
    48         spinlock ready_queue_lock;                      // Ready queue locks
    49         __thread_queue_t ready_queue;                   // Ready queue for threads
    50         unsigned long long int preemption;              // Preemption rate on this cluster
     55        // Ready queue locks
     56        spinlock ready_queue_lock;
     57
     58        // Ready queue for threads
     59        __thread_queue_t ready_queue;
     60
     61        // Preemption rate on this cluster
     62        unsigned long long int preemption;
    5163};
    5264
     
    7991struct processor {
    8092        // Main state
    81         struct processorCtx_t * runner;                 // Coroutine ctx who does keeps the state of the processor
    82         cluster * cltr;                                 // Cluster from which to get threads
    83         pthread_t kernel_thread;                        // Handle to pthreads
     93        // Coroutine ctx who does keeps the state of the processor
     94        struct processorCtx_t * runner;
     95
     96        // Cluster from which to get threads
     97        cluster * cltr;
     98
     99        // Handle to pthreads
     100        pthread_t kernel_thread;
    84101
    85102        // Termination
    86         volatile bool do_terminate;                     // Set to true to notify the processor should terminate
    87         semaphore terminated;                           // Termination synchronisation
     103        // Set to true to notify the processor should terminate
     104        volatile bool do_terminate;
     105
     106        // Termination synchronisation
     107        semaphore terminated;
    88108
    89109        // RunThread data
    90         struct FinishAction finish;                     // Action to do after a thread is ran
     110        // Action to do after a thread is ran
     111        struct FinishAction finish;
    91112
    92113        // Preemption data
    93         struct alarm_node_t * preemption_alarm;         // Node which is added in the discrete event simulaiton
    94         bool pending_preemption;                        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
     114        // Node which is added in the discrete event simulaiton
     115        struct alarm_node_t * preemption_alarm;
     116
     117        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
     118        bool pending_preemption;
    95119
    96120#ifdef __CFA_DEBUG__
    97         char * last_enable;                             // Last function to enable preemption on this processor
     121        // Last function to enable preemption on this processor
     122        char * last_enable;
    98123#endif
    99124};
  • src/libcfa/concurrency/monitor

    r8fc45b7 r025278e  
    3939}
    4040
    41 // static inline int ?<?(monitor_desc* lhs, monitor_desc* rhs) {
    42 //      return ((intptr_t)lhs) < ((intptr_t)rhs);
    43 // }
    44 
    4541struct monitor_guard_t {
    4642        monitor_desc ** m;
     
    7470
    7571struct __condition_criterion_t {
    76         bool ready;                                             //Whether or not the criterion is met (True if met)
    77         monitor_desc * target;                          //The monitor this criterion concerns
    78         struct __condition_node_t * owner;              //The parent node to which this criterion belongs
    79         __condition_criterion_t * next;         //Intrusive linked list Next field
     72        // Whether or not the criterion is met (True if met)
     73        bool ready;
     74
     75        // The monitor this criterion concerns
     76        monitor_desc * target;
     77
     78        // The parent node to which this criterion belongs
     79        struct __condition_node_t * owner;
     80
     81        // Intrusive linked list Next field
     82        __condition_criterion_t * next;
     83
    8084};
    8185
    8286struct __condition_node_t {
    83         thread_desc * waiting_thread;                   //Thread that needs to be woken when all criteria are met
    84         __condition_criterion_t * criteria;     //Array of criteria (Criterions are contiguous in memory)
    85         unsigned short count;                           //Number of criterions in the criteria
    86         __condition_node_t * next;                      //Intrusive linked list Next field
    87         uintptr_t user_info;                            //Custom user info accessible before signalling
     87        // Thread that needs to be woken when all criteria are met
     88        thread_desc * waiting_thread;
     89
     90        // Array of criteria (Criterions are contiguous in memory)
     91        __condition_criterion_t * criteria;
     92
     93        // Number of criterions in the criteria
     94        unsigned short count;
     95
     96        // Intrusive linked list Next field
     97        __condition_node_t * next;
     98
     99        // Custom user info accessible before signalling
     100        uintptr_t user_info;
     101
    88102};
    89103
     
    102116
    103117struct condition {
    104         __condition_blocked_queue_t blocked;    //Link list which contains the blocked threads as-well as the information needed to unblock them
    105         monitor_desc ** monitors;                       //Array of monitor pointers (Monitors are NOT contiguous in memory)
    106         unsigned short monitor_count;                   //Number of monitors in the array
     118        // Link list which contains the blocked threads as-well as the information needed to unblock them
     119        __condition_blocked_queue_t blocked;
     120
     121        // Array of monitor pointers (Monitors are NOT contiguous in memory)
     122        monitor_desc ** monitors;
     123
     124        // Number of monitors in the array
     125        unsigned short monitor_count;
     126
    107127};
    108128
Note: See TracChangeset for help on using the changeset viewer.