source: libcfa/src/concurrency/kernel.hfa@ cd59d28

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since cd59d28 was 5cb51502, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Fix stats so they are correctly tallied when called from outside the cluster.

  • Property mode set to 100644
File size: 8.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel -- Header containing the core of the kernel API
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb 4 12:29:26 2020
13// Update Count : 22
14//
15
16#pragma once
17
18#include "invoke.h"
19#include "time_t.hfa"
20#include "coroutine.hfa"
21
22#include "containers/list.hfa"
23
24extern "C" {
25 #include <bits/pthreadtypes.h>
26 #include <pthread.h>
27 #include <linux/types.h>
28}
29
30#ifdef __CFA_WITH_VERIFY__
31 extern bool __cfaabi_dbg_in_kernel();
32#endif
33
34//-----------------------------------------------------------------------------
35// I/O
36struct cluster;
37struct $io_context;
38struct $io_arbiter;
39
40struct io_context_params {
41 int num_entries;
42};
43
44void ?{}(io_context_params & this);
45
46//-----------------------------------------------------------------------------
47// Processor
48extern struct cluster * mainCluster;
49
50// Processor id, required for scheduling threads
51struct __processor_id_t {
52 unsigned id:24;
53
54 #if !defined(__CFA_NO_STATISTICS__)
55 struct __stats_t * stats;
56 #endif
57};
58
59coroutine processorCtx_t {
60 struct processor * proc;
61};
62
63// Wrapper around kernel threads
64struct __attribute__((aligned(128))) processor {
65 // Main state
66 inline __processor_id_t;
67
68 // Cluster from which to get threads
69 struct cluster * cltr;
70
71 // Id within the cluster
72 unsigned cltr_id;
73
74 // Set to true to notify the processor should terminate
75 volatile bool do_terminate;
76
77 // Coroutine ctx who does keeps the state of the processor
78 struct processorCtx_t runner;
79
80 // Name of the processor
81 const char * name;
82
83 // Handle to pthreads
84 pthread_t kernel_thread;
85
86 struct {
87 $io_context * ctx;
88 bool pending;
89 bool dirty;
90 } io;
91
92 // Preemption data
93 // Node which is added in the discrete event simulaiton
94 struct alarm_node_t * preemption_alarm;
95
96 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
97 bool pending_preemption;
98
99 // Idle lock (kernel semaphore)
100 int idle;
101
102 // Termination synchronisation (user semaphore)
103 oneshot terminated;
104
105 // pthread Stack
106 void * stack;
107
108 // Link lists fields
109 DLISTED_MGD_IMPL_IN(processor)
110
111 // special init fields
112 // This is needed for memcached integration
113 // once memcached experiments are done this should probably be removed
114 // it is not a particularly safe scheme as it can make processors less homogeneous
115 struct {
116 $thread * thrd;
117 } init;
118
119 #if !defined(__CFA_NO_STATISTICS__)
120 int print_stats;
121 bool print_halts;
122 #endif
123
124#ifdef __CFA_DEBUG__
125 // Last function to enable preemption on this processor
126 const char * last_enable;
127#endif
128};
129
130void ?{}(processor & this, const char name[], struct cluster & cltr);
131void ^?{}(processor & this);
132
133static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; }
134static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
135static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster}; }
136
137DLISTED_MGD_IMPL_OUT(processor)
138
139//-----------------------------------------------------------------------------
140// Cluster Tools
141
142// Intrusives lanes which are used by the relaxed ready queue
143struct __attribute__((aligned(128))) __intrusive_lane_t;
144void ?{}(__intrusive_lane_t & this);
145void ^?{}(__intrusive_lane_t & this);
146
147// Counter used for wether or not the lanes are all empty
148struct __attribute__((aligned(128))) __snzi_node_t;
149struct __snzi_t {
150 unsigned mask;
151 int root;
152 __snzi_node_t * nodes;
153};
154
155void ?{}( __snzi_t & this, unsigned depth );
156void ^?{}( __snzi_t & this );
157
158//TODO adjust cache size to ARCHITECTURE
159// Structure holding the relaxed ready queue
160struct __ready_queue_t {
161 // Data tracking how many/which lanes are used
162 // Aligned to 128 for cache locality
163 __snzi_t snzi;
164
165 // Data tracking the actual lanes
166 // On a seperate cacheline from the used struct since
167 // used can change on each push/pop but this data
168 // only changes on shrink/grow
169 struct {
170 // Arary of lanes
171 __intrusive_lane_t * volatile data;
172
173 // Number of lanes (empty or not)
174 volatile size_t count;
175 } lanes;
176};
177
178void ?{}(__ready_queue_t & this);
179void ^?{}(__ready_queue_t & this);
180
181// Idle Sleep
182struct __cluster_idles {
183 // Spin lock protecting the queue
184 volatile uint64_t lock;
185
186 // Total number of processors
187 unsigned total;
188
189 // Total number of idle processors
190 unsigned idle;
191
192 // List of idle processors
193 dlist(processor, processor) list;
194};
195
196//-----------------------------------------------------------------------------
197// Cluster
198struct __attribute__((aligned(128))) cluster {
199 // Ready queue for threads
200 __ready_queue_t ready_queue;
201
202 // Name of the cluster
203 const char * name;
204
205 // Preemption rate on this cluster
206 Duration preemption_rate;
207
208 // List of idle processors
209 __cluster_idles idles;
210
211 // List of threads
212 __spinlock_t thread_list_lock;
213 __dllist_t(struct $thread) threads;
214 unsigned int nthreads;
215
216 // Link lists fields
217 struct __dbg_node_cltr {
218 cluster * next;
219 cluster * prev;
220 } node;
221
222 struct {
223 $io_arbiter * arbiter;
224 io_context_params params;
225 } io;
226
227 #if !defined(__CFA_NO_STATISTICS__)
228 struct __stats_t * stats;
229 int print_stats;
230 #endif
231};
232extern Duration default_preemption();
233
234void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
235void ^?{}(cluster & this);
236
237static inline void ?{} (cluster & this) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
238static inline void ?{} (cluster & this, Duration preemption_rate) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
239static inline void ?{} (cluster & this, const char name[]) { io_context_params default_params; this{name, default_preemption(), 1, default_params}; }
240static inline void ?{} (cluster & this, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
241static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
242static inline void ?{} (cluster & this, const char name[], unsigned num_io) { io_context_params default_params; this{name, default_preemption(), num_io, default_params}; }
243static inline void ?{} (cluster & this, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
244static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
245static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params) { this{name, default_preemption(), 1, io_params}; }
246static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
247static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
248static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params) { this{name, default_preemption(), num_io, io_params}; }
249
250static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
251
252static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE
253static inline struct cluster * active_cluster () { return publicTLS_get( this_processor )->cltr; }
254
255#if !defined(__CFA_NO_STATISTICS__)
256 void print_stats_now( cluster & this, int flags );
257
258 static inline void print_stats_at_exit( cluster & this, int flags ) {
259 this.print_stats |= flags;
260 }
261
262 static inline void print_stats_at_exit( processor & this, int flags ) {
263 this.print_stats |= flags;
264 }
265
266 void print_halts( processor & this );
267#endif
268
269// Local Variables: //
270// mode: c //
271// tab-width: 4 //
272// End: //
Note: See TracBrowser for help on using the repository browser.