source: libcfa/src/concurrency/kernel.hfa@ 7ab28b69

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 7ab28b69 was 58d64a4, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

processor_id_t now have a bool to state if it is a full processor.
kernelTLS.this_thread now has much tighter lifetime.
Remove warning in kernel.cfa

  • Property mode set to 100644
File size: 9.0 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb 4 12:29:26 2020
13// Update Count : 22
14//
15
16#pragma once
17
18#include "invoke.h"
19#include "time_t.hfa"
20#include "coroutine.hfa"
21
22#include "containers/list.hfa"
23
24extern "C" {
25 #include <bits/pthreadtypes.h>
26 #include <linux/types.h>
27}
28
29//-----------------------------------------------------------------------------
30// Locks
31struct semaphore {
32 __spinlock_t lock;
33 int count;
34 __queue_t($thread) waiting;
35};
36
37void ?{}(semaphore & this, int count = 1);
38void ^?{}(semaphore & this);
39bool P (semaphore & this);
40bool V (semaphore & this);
41bool V (semaphore & this, unsigned count);
42
43
44//-----------------------------------------------------------------------------
45// Processor
46extern struct cluster * mainCluster;
47
48// Processor id, required for scheduling threads
49struct __processor_id_t {
50 unsigned id:24;
51 bool full_proc:1;
52
53 #if !defined(__CFA_NO_STATISTICS__)
54 struct __stats_t * stats;
55 #endif
56};
57
58coroutine processorCtx_t {
59 struct processor * proc;
60};
61
62// Wrapper around kernel threads
63struct __attribute__((aligned(128))) processor {
64 // Main state
65 inline __processor_id_t;
66
67 // Cluster from which to get threads
68 struct cluster * cltr;
69
70 // Set to true to notify the processor should terminate
71 volatile bool do_terminate;
72
73 // Coroutine ctx who does keeps the state of the processor
74 struct processorCtx_t runner;
75
76 // Name of the processor
77 const char * name;
78
79 // Handle to pthreads
80 pthread_t kernel_thread;
81
82 // RunThread data
83 // Action to do after a thread is ran
84 $thread * destroyer;
85
86 // Preemption data
87 // Node which is added in the discrete event simulaiton
88 struct alarm_node_t * preemption_alarm;
89
90 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
91 bool pending_preemption;
92
93 // Idle lock (kernel semaphore)
94 __bin_sem_t idle;
95
96 // Termination synchronisation (user semaphore)
97 semaphore terminated;
98
99 // pthread Stack
100 void * stack;
101
102 // Link lists fields
103 DLISTED_MGD_IMPL_IN(processor)
104
105 #if !defined(__CFA_NO_STATISTICS__)
106 int print_stats;
107 bool print_halts;
108 #endif
109
110#ifdef __CFA_DEBUG__
111 // Last function to enable preemption on this processor
112 const char * last_enable;
113#endif
114};
115
116void ?{}(processor & this, const char name[], struct cluster & cltr);
117void ^?{}(processor & this);
118
119static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; }
120static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
121static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
122
123DLISTED_MGD_IMPL_OUT(processor)
124
125//-----------------------------------------------------------------------------
126// I/O
127struct __io_data;
128
129// IO poller user-thread
130// Not using the "thread" keyword because we want to control
131// more carefully when to start/stop it
132struct $io_ctx_thread {
133 struct __io_data * ring;
134 single_sem sem;
135 volatile bool done;
136 $thread self;
137};
138
139
140struct io_context {
141 $io_ctx_thread thrd;
142};
143
144struct io_context_params {
145 int num_entries;
146 int num_ready;
147 int submit_aff;
148 bool eager_submits:1;
149 bool poller_submits:1;
150 bool poll_submit:1;
151 bool poll_complete:1;
152};
153
154void ?{}(io_context_params & this);
155
156void ?{}(io_context & this, struct cluster & cl);
157void ?{}(io_context & this, struct cluster & cl, const io_context_params & params);
158void ^?{}(io_context & this);
159
160struct io_cancellation {
161 __u64 target;
162};
163
164static inline void ?{}(io_cancellation & this) { this.target = -1u; }
165static inline void ^?{}(io_cancellation & this) {}
166bool cancel(io_cancellation & this);
167
168//-----------------------------------------------------------------------------
169// Cluster Tools
170
171// Intrusives lanes which are used by the relaxed ready queue
172struct __attribute__((aligned(128))) __intrusive_lane_t;
173void ?{}(__intrusive_lane_t & this);
174void ^?{}(__intrusive_lane_t & this);
175
176// Counter used for wether or not the lanes are all empty
177struct __attribute__((aligned(128))) __snzi_node_t;
178struct __snzi_t {
179 unsigned mask;
180 int root;
181 __snzi_node_t * nodes;
182};
183
184void ?{}( __snzi_t & this, unsigned depth );
185void ^?{}( __snzi_t & this );
186
187//TODO adjust cache size to ARCHITECTURE
188// Structure holding the relaxed ready queue
189struct __ready_queue_t {
190 // Data tracking how many/which lanes are used
191 // Aligned to 128 for cache locality
192 __snzi_t snzi;
193
194 // Data tracking the actual lanes
195 // On a seperate cacheline from the used struct since
196 // used can change on each push/pop but this data
197 // only changes on shrink/grow
198 struct {
199 // Arary of lanes
200 __intrusive_lane_t * volatile data;
201
202 // Number of lanes (empty or not)
203 volatile size_t count;
204 } lanes;
205};
206
207void ?{}(__ready_queue_t & this);
208void ^?{}(__ready_queue_t & this);
209
210// Idle Sleep
211struct __cluster_idles {
212 // Spin lock protecting the queue
213 volatile uint64_t lock;
214
215 // Total number of processors
216 unsigned total;
217
218 // Total number of idle processors
219 unsigned idle;
220
221 // List of idle processors
222 dlist(processor, processor) list;
223};
224
225//-----------------------------------------------------------------------------
226// Cluster
227struct __attribute__((aligned(128))) cluster {
228 // Ready queue for threads
229 __ready_queue_t ready_queue;
230
231 // Name of the cluster
232 const char * name;
233
234 // Preemption rate on this cluster
235 Duration preemption_rate;
236
237 // List of idle processors
238 __cluster_idles idles;
239
240 // List of threads
241 __spinlock_t thread_list_lock;
242 __dllist_t(struct $thread) threads;
243 unsigned int nthreads;
244
245 // Link lists fields
246 struct __dbg_node_cltr {
247 cluster * next;
248 cluster * prev;
249 } node;
250
251 struct {
252 io_context * ctxs;
253 unsigned cnt;
254 } io;
255
256 #if !defined(__CFA_NO_STATISTICS__)
257 struct __stats_t * stats;
258 int print_stats;
259 #endif
260};
261extern Duration default_preemption();
262
263void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
264void ^?{}(cluster & this);
265
266static inline void ?{} (cluster & this) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
267static inline void ?{} (cluster & this, Duration preemption_rate) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
268static inline void ?{} (cluster & this, const char name[]) { io_context_params default_params; this{name, default_preemption(), 1, default_params}; }
269static inline void ?{} (cluster & this, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
270static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
271static inline void ?{} (cluster & this, const char name[], unsigned num_io) { io_context_params default_params; this{name, default_preemption(), num_io, default_params}; }
272static inline void ?{} (cluster & this, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
273static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
274static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params) { this{name, default_preemption(), 1, io_params}; }
275static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
276static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
277static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params) { this{name, default_preemption(), num_io, io_params}; }
278
279static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
280
281static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE
282static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; }
283
284#if !defined(__CFA_NO_STATISTICS__)
285 static inline void print_stats_at_exit( cluster & this, int flags ) {
286 this.print_stats |= flags;
287 }
288
289 static inline void print_stats_at_exit( processor & this, int flags ) {
290 this.print_stats |= flags;
291 }
292
293 void print_halts( processor & this );
294#endif
295
296// Local Variables: //
297// mode: c //
298// tab-width: 4 //
299// End: //
Note: See TracBrowser for help on using the repository browser.