Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel.hfa

    r454f478 r431cd4f  
    2828}
    2929
    30 //-----------------------------------------------------------------------------
    31 // Underlying Locks
    3230#ifdef __CFA_WITH_VERIFY__
    3331        extern bool __cfaabi_dbg_in_kernel();
    3432#endif
    3533
    36 extern "C" {
    37         char * strerror(int);
    38 }
    39 #define CHECKED(x) { int err = x; if( err != 0 ) abort("KERNEL ERROR: Operation \"" #x "\" return error %d - %s\n", err, strerror(err)); }
    40 
    41 struct __bin_sem_t {
    42         pthread_mutex_t         lock;
    43         pthread_cond_t          cond;
    44         int                     val;
    45 };
    46 
    47 static inline void ?{}(__bin_sem_t & this) with( this ) {
    48         // Create the mutex with error checking
    49         pthread_mutexattr_t mattr;
    50         pthread_mutexattr_init( &mattr );
    51         pthread_mutexattr_settype( &mattr, PTHREAD_MUTEX_ERRORCHECK_NP);
    52         pthread_mutex_init(&lock, &mattr);
    53 
    54         pthread_cond_init (&cond, (const pthread_condattr_t *)0p);  // workaround trac#208: cast should not be required
    55         val = 0;
    56 }
    57 
    58 static inline void ^?{}(__bin_sem_t & this) with( this ) {
    59         CHECKED( pthread_mutex_destroy(&lock) );
    60         CHECKED( pthread_cond_destroy (&cond) );
    61 }
    62 
    63 static inline void wait(__bin_sem_t & this) with( this ) {
    64         verify(__cfaabi_dbg_in_kernel());
    65         CHECKED( pthread_mutex_lock(&lock) );
    66                 while(val < 1) {
    67                         pthread_cond_wait(&cond, &lock);
    68                 }
    69                 val -= 1;
    70         CHECKED( pthread_mutex_unlock(&lock) );
    71 }
    72 
    73 static inline bool post(__bin_sem_t & this) with( this ) {
    74         bool needs_signal = false;
    75 
    76         CHECKED( pthread_mutex_lock(&lock) );
    77                 if(val < 1) {
    78                         val += 1;
    79                         pthread_cond_signal(&cond);
    80                         needs_signal = true;
    81                 }
    82         CHECKED( pthread_mutex_unlock(&lock) );
    83 
    84         return needs_signal;
    85 }
    86 
    87 #undef CHECKED
    88 
     34//-----------------------------------------------------------------------------
     35// I/O
     36struct cluster;
     37struct $io_context;
     38struct $io_arbiter;
     39
     40struct io_context_params {
     41        int num_entries;
     42};
     43
     44void  ?{}(io_context_params & this);
    8945
    9046//-----------------------------------------------------------------------------
     
    9551struct __processor_id_t {
    9652        unsigned id:24;
    97         bool full_proc:1;
    9853
    9954        #if !defined(__CFA_NO_STATISTICS__)
     
    11469        struct cluster * cltr;
    11570
     71        // Ready Queue state per processor
     72        struct {
     73                unsigned short its;
     74                unsigned short itr;
     75                unsigned id;
     76                unsigned target;
     77                unsigned long long int cutoff;
     78        } rdq;
     79
    11680        // Set to true to notify the processor should terminate
    11781        volatile bool do_terminate;
     
    12589        // Handle to pthreads
    12690        pthread_t kernel_thread;
     91
     92        struct {
     93                $io_context * ctx;
     94                bool pending;
     95                bool dirty;
     96        } io;
    12797
    12898        // Preemption data
     
    134104
    135105        // Idle lock (kernel semaphore)
    136         __bin_sem_t idle;
     106        int idle;
    137107
    138108        // Termination synchronisation (user semaphore)
     
    144114        // Link lists fields
    145115        DLISTED_MGD_IMPL_IN(processor)
     116
     117        // special init fields
     118        // This is needed for memcached integration
     119        // once memcached experiments are done this should probably be removed
     120        // it is not a particularly safe scheme as it can make processors less homogeneous
     121        struct {
     122                $thread * thrd;
     123        } init;
    146124
    147125        #if !defined(__CFA_NO_STATISTICS__)
     
    159137void ^?{}(processor & this);
    160138
    161 static inline void  ?{}(processor & this)                    { this{ "Anonymous Processor", *mainCluster}; }
    162 static inline void  ?{}(processor & this, struct cluster & cltr)    { this{ "Anonymous Processor", cltr}; }
    163 static inline void  ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
     139static inline void  ?{}(processor & this)                        { this{ "Anonymous Processor", *mainCluster}; }
     140static inline void  ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
     141static inline void  ?{}(processor & this, const char name[])     { this{name, *mainCluster}; }
    164142
    165143DLISTED_MGD_IMPL_OUT(processor)
    166144
    167145//-----------------------------------------------------------------------------
    168 // I/O
    169 struct __io_data;
    170 
    171 // IO poller user-thread
    172 // Not using the "thread" keyword because we want to control
    173 // more carefully when to start/stop it
    174 struct $io_ctx_thread {
    175         struct __io_data * ring;
    176         single_sem sem;
    177         volatile bool done;
    178         $thread self;
    179 };
    180 
    181 
    182 struct io_context {
    183         $io_ctx_thread thrd;
    184 };
    185 
    186 struct io_context_params {
    187         int num_entries;
    188         int num_ready;
    189         int submit_aff;
    190         bool eager_submits:1;
    191         bool poller_submits:1;
    192         bool poll_submit:1;
    193         bool poll_complete:1;
    194 };
    195 
    196 void  ?{}(io_context_params & this);
    197 
    198 void  ?{}(io_context & this, struct cluster & cl);
    199 void  ?{}(io_context & this, struct cluster & cl, const io_context_params & params);
    200 void ^?{}(io_context & this);
    201 
    202 struct io_cancellation {
    203         __u64 target;
    204 };
    205 
    206 static inline void  ?{}(io_cancellation & this) { this.target = -1u; }
    207 static inline void ^?{}(io_cancellation &) {}
    208 bool cancel(io_cancellation & this);
    209 
    210 //-----------------------------------------------------------------------------
    211146// Cluster Tools
    212147
    213 // Intrusives lanes which are used by the relaxed ready queue
     148// Intrusives lanes which are used by the ready queue
    214149struct __attribute__((aligned(128))) __intrusive_lane_t;
    215150void  ?{}(__intrusive_lane_t & this);
    216151void ^?{}(__intrusive_lane_t & this);
    217152
    218 // Counter used for wether or not the lanes are all empty
    219 struct __attribute__((aligned(128))) __snzi_node_t;
    220 struct __snzi_t {
    221         unsigned mask;
    222         int root;
    223         __snzi_node_t * nodes;
    224 };
    225 
    226 void  ?{}( __snzi_t & this, unsigned depth );
    227 void ^?{}( __snzi_t & this );
     153// Aligned timestamps which are used by the relaxed ready queue
     154struct __attribute__((aligned(128))) __timestamp_t;
     155void  ?{}(__timestamp_t & this);
     156void ^?{}(__timestamp_t & this);
    228157
    229158//TODO adjust cache size to ARCHITECTURE
    230159// Structure holding the relaxed ready queue
    231160struct __ready_queue_t {
    232         // Data tracking how many/which lanes are used
    233         // Aligned to 128 for cache locality
    234         __snzi_t snzi;
    235 
    236161        // Data tracking the actual lanes
    237162        // On a seperate cacheline from the used struct since
     
    242167                __intrusive_lane_t * volatile data;
    243168
     169                // Array of times
     170                __timestamp_t * volatile tscs;
     171
    244172                // Number of lanes (empty or not)
    245173                volatile size_t count;
     
    251179
    252180// Idle Sleep
    253 struct __cluster_idles {
     181struct __cluster_proc_list {
    254182        // Spin lock protecting the queue
    255183        volatile uint64_t lock;
     
    262190
    263191        // List of idle processors
    264         dlist(processor, processor) list;
     192        dlist(processor, processor) idles;
     193
     194        // List of active processors
     195        dlist(processor, processor) actives;
    265196};
    266197
     
    278209
    279210        // List of idle processors
    280         __cluster_idles idles;
     211        __cluster_proc_list procs;
    281212
    282213        // List of threads
     
    292223
    293224        struct {
    294                 io_context * ctxs;
    295                 unsigned cnt;
     225                $io_arbiter * arbiter;
     226                io_context_params params;
    296227        } io;
    297228
Note: See TracChangeset for help on using the changeset viewer.