source: libcfa/src/concurrency/kernel.hfa @ 1717b12

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 1717b12 was 454f478, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Re-arranged and commented low-level headers.
Main goal was for better support of weakso locks that are comming.

  • Property mode set to 100644
File size: 10.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel -- Header containing the core of the kernel API
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb  4 12:29:26 2020
13// Update Count     : 22
14//
15
16#pragma once
17
18#include "invoke.h"
19#include "time_t.hfa"
20#include "coroutine.hfa"
21
22#include "containers/list.hfa"
23
24extern "C" {
25        #include <bits/pthreadtypes.h>
26        #include <pthread.h>
27        #include <linux/types.h>
28}
29
30//-----------------------------------------------------------------------------
31// Underlying Locks
32#ifdef __CFA_WITH_VERIFY__
33        extern bool __cfaabi_dbg_in_kernel();
34#endif
35
36extern "C" {
37        char * strerror(int);
38}
39#define CHECKED(x) { int err = x; if( err != 0 ) abort("KERNEL ERROR: Operation \"" #x "\" return error %d - %s\n", err, strerror(err)); }
40
41struct __bin_sem_t {
42        pthread_mutex_t         lock;
43        pthread_cond_t          cond;
44        int                     val;
45};
46
47static inline void ?{}(__bin_sem_t & this) with( this ) {
48        // Create the mutex with error checking
49        pthread_mutexattr_t mattr;
50        pthread_mutexattr_init( &mattr );
51        pthread_mutexattr_settype( &mattr, PTHREAD_MUTEX_ERRORCHECK_NP);
52        pthread_mutex_init(&lock, &mattr);
53
54        pthread_cond_init (&cond, (const pthread_condattr_t *)0p);  // workaround trac#208: cast should not be required
55        val = 0;
56}
57
58static inline void ^?{}(__bin_sem_t & this) with( this ) {
59        CHECKED( pthread_mutex_destroy(&lock) );
60        CHECKED( pthread_cond_destroy (&cond) );
61}
62
63static inline void wait(__bin_sem_t & this) with( this ) {
64        verify(__cfaabi_dbg_in_kernel());
65        CHECKED( pthread_mutex_lock(&lock) );
66                while(val < 1) {
67                        pthread_cond_wait(&cond, &lock);
68                }
69                val -= 1;
70        CHECKED( pthread_mutex_unlock(&lock) );
71}
72
73static inline bool post(__bin_sem_t & this) with( this ) {
74        bool needs_signal = false;
75
76        CHECKED( pthread_mutex_lock(&lock) );
77                if(val < 1) {
78                        val += 1;
79                        pthread_cond_signal(&cond);
80                        needs_signal = true;
81                }
82        CHECKED( pthread_mutex_unlock(&lock) );
83
84        return needs_signal;
85}
86
87#undef CHECKED
88
89
90//-----------------------------------------------------------------------------
91// Processor
92extern struct cluster * mainCluster;
93
94// Processor id, required for scheduling threads
95struct __processor_id_t {
96        unsigned id:24;
97        bool full_proc:1;
98
99        #if !defined(__CFA_NO_STATISTICS__)
100                struct __stats_t * stats;
101        #endif
102};
103
104coroutine processorCtx_t {
105        struct processor * proc;
106};
107
108// Wrapper around kernel threads
109struct __attribute__((aligned(128))) processor {
110        // Main state
111        inline __processor_id_t;
112
113        // Cluster from which to get threads
114        struct cluster * cltr;
115
116        // Set to true to notify the processor should terminate
117        volatile bool do_terminate;
118
119        // Coroutine ctx who does keeps the state of the processor
120        struct processorCtx_t runner;
121
122        // Name of the processor
123        const char * name;
124
125        // Handle to pthreads
126        pthread_t kernel_thread;
127
128        // Preemption data
129        // Node which is added in the discrete event simulaiton
130        struct alarm_node_t * preemption_alarm;
131
132        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
133        bool pending_preemption;
134
135        // Idle lock (kernel semaphore)
136        __bin_sem_t idle;
137
138        // Termination synchronisation (user semaphore)
139        oneshot terminated;
140
141        // pthread Stack
142        void * stack;
143
144        // Link lists fields
145        DLISTED_MGD_IMPL_IN(processor)
146
147        #if !defined(__CFA_NO_STATISTICS__)
148                int print_stats;
149                bool print_halts;
150        #endif
151
152#ifdef __CFA_DEBUG__
153        // Last function to enable preemption on this processor
154        const char * last_enable;
155#endif
156};
157
158void  ?{}(processor & this, const char name[], struct cluster & cltr);
159void ^?{}(processor & this);
160
161static inline void  ?{}(processor & this)                    { this{ "Anonymous Processor", *mainCluster}; }
162static inline void  ?{}(processor & this, struct cluster & cltr)    { this{ "Anonymous Processor", cltr}; }
163static inline void  ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
164
165DLISTED_MGD_IMPL_OUT(processor)
166
167//-----------------------------------------------------------------------------
168// I/O
169struct __io_data;
170
171// IO poller user-thread
172// Not using the "thread" keyword because we want to control
173// more carefully when to start/stop it
174struct $io_ctx_thread {
175        struct __io_data * ring;
176        single_sem sem;
177        volatile bool done;
178        $thread self;
179};
180
181
182struct io_context {
183        $io_ctx_thread thrd;
184};
185
186struct io_context_params {
187        int num_entries;
188        int num_ready;
189        int submit_aff;
190        bool eager_submits:1;
191        bool poller_submits:1;
192        bool poll_submit:1;
193        bool poll_complete:1;
194};
195
196void  ?{}(io_context_params & this);
197
198void  ?{}(io_context & this, struct cluster & cl);
199void  ?{}(io_context & this, struct cluster & cl, const io_context_params & params);
200void ^?{}(io_context & this);
201
202struct io_cancellation {
203        __u64 target;
204};
205
206static inline void  ?{}(io_cancellation & this) { this.target = -1u; }
207static inline void ^?{}(io_cancellation &) {}
208bool cancel(io_cancellation & this);
209
210//-----------------------------------------------------------------------------
211// Cluster Tools
212
213// Intrusives lanes which are used by the relaxed ready queue
214struct __attribute__((aligned(128))) __intrusive_lane_t;
215void  ?{}(__intrusive_lane_t & this);
216void ^?{}(__intrusive_lane_t & this);
217
218// Counter used for wether or not the lanes are all empty
219struct __attribute__((aligned(128))) __snzi_node_t;
220struct __snzi_t {
221        unsigned mask;
222        int root;
223        __snzi_node_t * nodes;
224};
225
226void  ?{}( __snzi_t & this, unsigned depth );
227void ^?{}( __snzi_t & this );
228
229//TODO adjust cache size to ARCHITECTURE
230// Structure holding the relaxed ready queue
231struct __ready_queue_t {
232        // Data tracking how many/which lanes are used
233        // Aligned to 128 for cache locality
234        __snzi_t snzi;
235
236        // Data tracking the actual lanes
237        // On a seperate cacheline from the used struct since
238        // used can change on each push/pop but this data
239        // only changes on shrink/grow
240        struct {
241                // Arary of lanes
242                __intrusive_lane_t * volatile data;
243
244                // Number of lanes (empty or not)
245                volatile size_t count;
246        } lanes;
247};
248
249void  ?{}(__ready_queue_t & this);
250void ^?{}(__ready_queue_t & this);
251
252// Idle Sleep
253struct __cluster_idles {
254        // Spin lock protecting the queue
255        volatile uint64_t lock;
256
257        // Total number of processors
258        unsigned total;
259
260        // Total number of idle processors
261        unsigned idle;
262
263        // List of idle processors
264        dlist(processor, processor) list;
265};
266
267//-----------------------------------------------------------------------------
268// Cluster
269struct __attribute__((aligned(128))) cluster {
270        // Ready queue for threads
271        __ready_queue_t ready_queue;
272
273        // Name of the cluster
274        const char * name;
275
276        // Preemption rate on this cluster
277        Duration preemption_rate;
278
279        // List of idle processors
280        __cluster_idles idles;
281
282        // List of threads
283        __spinlock_t thread_list_lock;
284        __dllist_t(struct $thread) threads;
285        unsigned int nthreads;
286
287        // Link lists fields
288        struct __dbg_node_cltr {
289                cluster * next;
290                cluster * prev;
291        } node;
292
293        struct {
294                io_context * ctxs;
295                unsigned cnt;
296        } io;
297
298        #if !defined(__CFA_NO_STATISTICS__)
299                struct __stats_t * stats;
300                int print_stats;
301        #endif
302};
303extern Duration default_preemption();
304
305void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
306void ^?{}(cluster & this);
307
308static inline void ?{} (cluster & this)                                            { io_context_params default_params;    this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
309static inline void ?{} (cluster & this, Duration preemption_rate)                  { io_context_params default_params;    this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
310static inline void ?{} (cluster & this, const char name[])                         { io_context_params default_params;    this{name, default_preemption(), 1, default_params}; }
311static inline void ?{} (cluster & this, unsigned num_io)                           { io_context_params default_params;    this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
312static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params;    this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
313static inline void ?{} (cluster & this, const char name[], unsigned num_io)        { io_context_params default_params;    this{name, default_preemption(), num_io, default_params}; }
314static inline void ?{} (cluster & this, const io_context_params & io_params)                                            { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
315static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params)                  { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
316static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params)                         { this{name, default_preemption(), 1, io_params}; }
317static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params)                           { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
318static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
319static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params)        { this{name, default_preemption(), num_io, io_params}; }
320
321static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
322
323static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE
324static inline struct cluster   * active_cluster  () { return publicTLS_get( this_processor )->cltr; }
325
326#if !defined(__CFA_NO_STATISTICS__)
327        void print_stats_now( cluster & this, int flags );
328
329        static inline void print_stats_at_exit( cluster & this, int flags ) {
330                this.print_stats |= flags;
331        }
332
333        static inline void print_stats_at_exit( processor & this, int flags ) {
334                this.print_stats |= flags;
335        }
336
337        void print_halts( processor & this );
338#endif
339
340// Local Variables: //
341// mode: c //
342// tab-width: 4 //
343// End: //
Note: See TracBrowser for help on using the repository browser.