source: libcfa/src/concurrency/kernel.hfa @ 4598e03

ADTast-experimentalenumpthread-emulationqualifiedEnum
Last change on this file since 4598e03 was 262fafd9, checked in by Thierry Delisle <tdelisle@…>, 2 years ago

Added debugging information to help find deadlock.

  • Property mode set to 100644
File size: 9.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel -- Header containing the core of the kernel API
8//
9// Author           : Thierry Delisle
10// Created On       : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb  4 12:29:26 2020
13// Update Count     : 22
14//
15
16#pragma once
17
18#include "invoke.h"
19#include "time_t.hfa"
20#include "coroutine.hfa"
21
22#include "containers/list.hfa"
23
24extern "C" {
25        #include <bits/pthreadtypes.h>
26        #include <pthread.h>
27        #include <linux/types.h>
28}
29
30#ifdef __CFA_WITH_VERIFY__
31        extern bool __cfaabi_dbg_in_kernel();
32#endif
33
34//-----------------------------------------------------------------------------
35// I/O
36struct cluster;
37struct $io_context;
38struct $io_arbiter;
39
40struct io_context_params {
41        int num_entries;
42};
43
44void  ?{}(io_context_params & this);
45
46//-----------------------------------------------------------------------------
47// Processor
48extern struct cluster * mainCluster;
49
50// Coroutine used py processors for the 2-step context switch
51coroutine processorCtx_t {
52        struct processor * proc;
53};
54
55struct io_future_t;
56
57// Information needed for idle sleep
58struct __fd_waitctx {
59        // semaphore/future like object
60        // values can be 0, 1 or some file descriptor.
61        // 0 - is the default state
62        // 1 - means the proc should wake-up immediately
63        // FD - means the proc is going asleep and should be woken by writing to the FD.
64        volatile int sem;
65
66        // The event FD that corresponds to this processor
67        int evfd;
68
69        // buffer into which the proc will read from evfd
70        // unused if not using io_uring for idle sleep
71        void * rdbuf;
72
73        // future use to track the read of the eventfd
74        // unused if not using io_uring for idle sleep
75        io_future_t * ftr;
76
77        volatile unsigned long long wake_time;
78        volatile unsigned long long sleep_time;
79};
80
81// Wrapper around kernel threads
82struct __attribute__((aligned(128))) processor {
83        // Cluster from which to get threads
84        struct cluster * cltr;
85
86        // Ready Queue state per processor
87        struct {
88                unsigned short its;
89                unsigned short itr;
90                unsigned id;
91                unsigned target;
92                unsigned last;
93                signed   cpu;
94        } rdq;
95
96        // Set to true to notify the processor should terminate
97        volatile bool do_terminate;
98
99        // Coroutine ctx who does keeps the state of the processor
100        struct processorCtx_t runner;
101
102        // Name of the processor
103        const char * name;
104
105        // Handle to pthreads
106        pthread_t kernel_thread;
107
108        // Unique id for the processor (not per cluster)
109        unsigned unique_id;
110
111        struct {
112                $io_context * ctx;
113                unsigned target;
114                volatile bool pending;
115                volatile bool dirty;
116        } io;
117
118        // Preemption data
119        // Node which is added in the discrete event simulaiton
120        struct alarm_node_t * preemption_alarm;
121
122        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
123        bool pending_preemption;
124
125        // context for idle sleep
126        struct __fd_waitctx idle_wctx;
127
128        // Termination synchronisation (user semaphore)
129        oneshot terminated;
130
131        // pthread Stack
132        void * stack;
133
134        // Link lists fields
135        inline dlink(processor);
136
137        // special init fields
138        // This is needed for memcached integration
139        // once memcached experiments are done this should probably be removed
140        // it is not a particularly safe scheme as it can make processors less homogeneous
141        struct {
142                thread$ * thrd;
143        } init;
144
145        struct KernelThreadData * local_data;
146
147        #if !defined(__CFA_NO_STATISTICS__)
148                int print_stats;
149                bool print_halts;
150        #endif
151
152#ifdef __CFA_DEBUG__
153        // Last function to enable preemption on this processor
154        const char * last_enable;
155#endif
156};
157P9_EMBEDDED( processor, dlink(processor) )
158
159void  ?{}(processor & this, const char name[], struct cluster & cltr);
160void ^?{}(processor & this);
161
162static inline void  ?{}(processor & this)                        { this{ "Anonymous Processor", *mainCluster}; }
163static inline void  ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
164static inline void  ?{}(processor & this, const char name[])     { this{name, *mainCluster}; }
165
166//-----------------------------------------------------------------------------
167// Cluster Tools
168
169// Intrusives lanes which are used by the ready queue
170struct __attribute__((aligned(128))) __intrusive_lane_t;
171void  ?{}(__intrusive_lane_t & this);
172void ^?{}(__intrusive_lane_t & this);
173
174// Aligned timestamps which are used by the ready queue and io subsystem
175struct __attribute__((aligned(128))) __timestamp_t {
176        volatile unsigned long long tv;
177        volatile unsigned long long ma;
178};
179
180static inline void  ?{}(__timestamp_t & this) { this.tv = 0; this.ma = 0; }
181static inline void ^?{}(__timestamp_t &) {}
182
183
184struct __attribute__((aligned(16))) __cache_id_t {
185        volatile unsigned id;
186};
187
188// Idle Sleep
189struct __cluster_proc_list {
190        // Spin lock protecting the queue
191        __spinlock_t lock;
192
193        // FD to use to wake a processor
194        struct __fd_waitctx * volatile fdw;
195
196        // Total number of processors
197        unsigned total;
198
199        // Total number of idle processors
200        unsigned idle;
201
202        // List of idle processors
203        dlist(processor) idles;
204
205        // List of active processors
206        dlist(processor) actives;
207};
208
209//-----------------------------------------------------------------------------
210// Cluster
211struct __attribute__((aligned(128))) cluster {
212        struct {
213                struct {
214                        // Arary of subqueues
215                        __intrusive_lane_t * data;
216
217                        // Time since subqueues were processed
218                        __timestamp_t * tscs;
219
220                        // Number of subqueue / timestamps
221                        size_t count;
222                } readyQ;
223
224                struct {
225                        // Array of $io_
226                        $io_context ** data;
227
228                        // Time since subqueues were processed
229                        __timestamp_t * tscs;
230
231                        // Number of I/O subqueues
232                        size_t count;
233                } io;
234
235                // Cache each kernel thread belongs to
236                __cache_id_t * caches;
237        } sched;
238
239        // // Ready queue for threads
240        // __ready_queue_t ready_queue;
241
242        // Name of the cluster
243        const char * name;
244
245        // Preemption rate on this cluster
246        Duration preemption_rate;
247
248        // List of idle processors
249        __cluster_proc_list procs;
250
251        // List of threads
252        __spinlock_t thread_list_lock;
253        __dllist_t(struct thread$) threads;
254        unsigned int nthreads;
255
256        // Link lists fields
257        struct __dbg_node_cltr {
258                cluster * next;
259                cluster * prev;
260        } node;
261
262        struct {
263                $io_arbiter * arbiter;
264                io_context_params params;
265        } io;
266
267        #if !defined(__CFA_NO_STATISTICS__)
268                struct __stats_t * stats;
269                int print_stats;
270        #endif
271};
272extern Duration default_preemption();
273
274void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
275void ^?{}(cluster & this);
276
277static inline void ?{} (cluster & this)                                            { io_context_params default_params;    this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
278static inline void ?{} (cluster & this, Duration preemption_rate)                  { io_context_params default_params;    this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
279static inline void ?{} (cluster & this, const char name[])                         { io_context_params default_params;    this{name, default_preemption(), 1, default_params}; }
280static inline void ?{} (cluster & this, unsigned num_io)                           { io_context_params default_params;    this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
281static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params;    this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
282static inline void ?{} (cluster & this, const char name[], unsigned num_io)        { io_context_params default_params;    this{name, default_preemption(), num_io, default_params}; }
283static inline void ?{} (cluster & this, const io_context_params & io_params)                                            { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
284static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params)                  { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
285static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params)                         { this{name, default_preemption(), 1, io_params}; }
286static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params)                           { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
287static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
288static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params)        { this{name, default_preemption(), num_io, io_params}; }
289
290static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
291
292static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE
293static inline struct cluster   * active_cluster  () { return publicTLS_get( this_processor )->cltr; }
294
295#if !defined(__CFA_NO_STATISTICS__)
296        void print_stats_now( cluster & this, int flags );
297
298        static inline void print_stats_at_exit( cluster & this, int flags ) {
299                this.print_stats |= flags;
300        }
301
302        static inline void print_stats_at_exit( processor & this, int flags ) {
303                this.print_stats |= flags;
304        }
305
306        void print_halts( processor & this );
307#endif
308
309// Local Variables: //
310// mode: c //
311// tab-width: 4 //
312// End: //
Note: See TracBrowser for help on using the repository browser.