source: libcfa/src/concurrency/kernel.hfa@ 7dafb7b

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 7dafb7b was 6d1790c, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Fixed some warnings in libcfa

  • Property mode set to 100644
File size: 9.0 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb 4 12:29:26 2020
13// Update Count : 22
14//
15
16#pragma once
17
18#include "invoke.h"
19#include "time_t.hfa"
20#include "coroutine.hfa"
21
22#include "containers/list.hfa"
23
24extern "C" {
25 #include <bits/pthreadtypes.h>
26 #include <linux/types.h>
27}
28
29//-----------------------------------------------------------------------------
30// Locks
31struct semaphore {
32 __spinlock_t lock;
33 int count;
34 __queue_t($thread) waiting;
35};
36
37void ?{}(semaphore & this, int count = 1);
38void ^?{}(semaphore & this);
39bool P (semaphore & this);
40bool V (semaphore & this);
41bool V (semaphore & this, unsigned count);
42
43
44//-----------------------------------------------------------------------------
45// Processor
46extern struct cluster * mainCluster;
47
48// Processor id, required for scheduling threads
49struct __processor_id_t {
50 unsigned id:24;
51 bool full_proc:1;
52
53 #if !defined(__CFA_NO_STATISTICS__)
54 struct __stats_t * stats;
55 #endif
56};
57
58coroutine processorCtx_t {
59 struct processor * proc;
60};
61
62// Wrapper around kernel threads
63struct __attribute__((aligned(128))) processor {
64 // Main state
65 inline __processor_id_t;
66
67 // Cluster from which to get threads
68 struct cluster * cltr;
69
70 // Set to true to notify the processor should terminate
71 volatile bool do_terminate;
72
73 // Coroutine ctx who does keeps the state of the processor
74 struct processorCtx_t runner;
75
76 // Name of the processor
77 const char * name;
78
79 // Handle to pthreads
80 pthread_t kernel_thread;
81
82 // Preemption data
83 // Node which is added in the discrete event simulaiton
84 struct alarm_node_t * preemption_alarm;
85
86 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
87 bool pending_preemption;
88
89 // Idle lock (kernel semaphore)
90 __bin_sem_t idle;
91
92 // Termination synchronisation (user semaphore)
93 semaphore terminated;
94
95 // pthread Stack
96 void * stack;
97
98 // Link lists fields
99 DLISTED_MGD_IMPL_IN(processor)
100
101 #if !defined(__CFA_NO_STATISTICS__)
102 int print_stats;
103 bool print_halts;
104 #endif
105
106#ifdef __CFA_DEBUG__
107 // Last function to enable preemption on this processor
108 const char * last_enable;
109#endif
110};
111
112void ?{}(processor & this, const char name[], struct cluster & cltr);
113void ^?{}(processor & this);
114
115static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; }
116static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
117static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
118
119DLISTED_MGD_IMPL_OUT(processor)
120
121//-----------------------------------------------------------------------------
122// I/O
123struct __io_data;
124
125// IO poller user-thread
126// Not using the "thread" keyword because we want to control
127// more carefully when to start/stop it
128struct $io_ctx_thread {
129 struct __io_data * ring;
130 single_sem sem;
131 volatile bool done;
132 $thread self;
133};
134
135
136struct io_context {
137 $io_ctx_thread thrd;
138};
139
140struct io_context_params {
141 int num_entries;
142 int num_ready;
143 int submit_aff;
144 bool eager_submits:1;
145 bool poller_submits:1;
146 bool poll_submit:1;
147 bool poll_complete:1;
148};
149
150void ?{}(io_context_params & this);
151
152void ?{}(io_context & this, struct cluster & cl);
153void ?{}(io_context & this, struct cluster & cl, const io_context_params & params);
154void ^?{}(io_context & this);
155
156struct io_cancellation {
157 __u64 target;
158};
159
160static inline void ?{}(io_cancellation & this) { this.target = -1u; }
161static inline void ^?{}(io_cancellation &) {}
162bool cancel(io_cancellation & this);
163
164//-----------------------------------------------------------------------------
165// Cluster Tools
166
167// Intrusives lanes which are used by the relaxed ready queue
168struct __attribute__((aligned(128))) __intrusive_lane_t;
169void ?{}(__intrusive_lane_t & this);
170void ^?{}(__intrusive_lane_t & this);
171
172// Counter used for wether or not the lanes are all empty
173struct __attribute__((aligned(128))) __snzi_node_t;
174struct __snzi_t {
175 unsigned mask;
176 int root;
177 __snzi_node_t * nodes;
178};
179
180void ?{}( __snzi_t & this, unsigned depth );
181void ^?{}( __snzi_t & this );
182
183//TODO adjust cache size to ARCHITECTURE
184// Structure holding the relaxed ready queue
185struct __ready_queue_t {
186 // Data tracking how many/which lanes are used
187 // Aligned to 128 for cache locality
188 __snzi_t snzi;
189
190 // Data tracking the actual lanes
191 // On a seperate cacheline from the used struct since
192 // used can change on each push/pop but this data
193 // only changes on shrink/grow
194 struct {
195 // Arary of lanes
196 __intrusive_lane_t * volatile data;
197
198 // Number of lanes (empty or not)
199 volatile size_t count;
200 } lanes;
201};
202
203void ?{}(__ready_queue_t & this);
204void ^?{}(__ready_queue_t & this);
205
206// Idle Sleep
207struct __cluster_idles {
208 // Spin lock protecting the queue
209 volatile uint64_t lock;
210
211 // Total number of processors
212 unsigned total;
213
214 // Total number of idle processors
215 unsigned idle;
216
217 // List of idle processors
218 dlist(processor, processor) list;
219};
220
221//-----------------------------------------------------------------------------
222// Cluster
223struct __attribute__((aligned(128))) cluster {
224 // Ready queue for threads
225 __ready_queue_t ready_queue;
226
227 // Name of the cluster
228 const char * name;
229
230 // Preemption rate on this cluster
231 Duration preemption_rate;
232
233 // List of idle processors
234 __cluster_idles idles;
235
236 // List of threads
237 __spinlock_t thread_list_lock;
238 __dllist_t(struct $thread) threads;
239 unsigned int nthreads;
240
241 // Link lists fields
242 struct __dbg_node_cltr {
243 cluster * next;
244 cluster * prev;
245 } node;
246
247 struct {
248 io_context * ctxs;
249 unsigned cnt;
250 } io;
251
252 #if !defined(__CFA_NO_STATISTICS__)
253 struct __stats_t * stats;
254 int print_stats;
255 #endif
256};
257extern Duration default_preemption();
258
259void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
260void ^?{}(cluster & this);
261
262static inline void ?{} (cluster & this) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
263static inline void ?{} (cluster & this, Duration preemption_rate) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
264static inline void ?{} (cluster & this, const char name[]) { io_context_params default_params; this{name, default_preemption(), 1, default_params}; }
265static inline void ?{} (cluster & this, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
266static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
267static inline void ?{} (cluster & this, const char name[], unsigned num_io) { io_context_params default_params; this{name, default_preemption(), num_io, default_params}; }
268static inline void ?{} (cluster & this, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
269static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
270static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params) { this{name, default_preemption(), 1, io_params}; }
271static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
272static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
273static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params) { this{name, default_preemption(), num_io, io_params}; }
274
275static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
276
277static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE
278static inline struct cluster * active_cluster () { return publicTLS_get( this_processor )->cltr; }
279
280#if !defined(__CFA_NO_STATISTICS__)
281 void print_stats_now( cluster & this, int flags );
282
283 static inline void print_stats_at_exit( cluster & this, int flags ) {
284 this.print_stats |= flags;
285 }
286
287 static inline void print_stats_at_exit( processor & this, int flags ) {
288 this.print_stats |= flags;
289 }
290
291 void print_halts( processor & this );
292#endif
293
294// Local Variables: //
295// mode: c //
296// tab-width: 4 //
297// End: //
Note: See TracBrowser for help on using the repository browser.