source: libcfa/src/concurrency/kernel.hfa@ 5a46e09

ADT ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 5a46e09 was e2f601f, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Merge branch 'master' of plg.uwaterloo.ca:software/cfa/cfa-cc

  • Property mode set to 100644
File size: 8.6 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel -- Header containing the core of the kernel API
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb 4 12:29:26 2020
13// Update Count : 22
14//
15
16#pragma once
17
18#include "invoke.h"
19#include "time_t.hfa"
20#include "coroutine.hfa"
21
22#include "containers/list.hfa"
23
24extern "C" {
25 #include <bits/pthreadtypes.h>
26 #include <pthread.h>
27 #include <linux/types.h>
28}
29
30#ifdef __CFA_WITH_VERIFY__
31 extern bool __cfaabi_dbg_in_kernel();
32#endif
33
34//-----------------------------------------------------------------------------
35// I/O
36struct cluster;
37struct $io_context;
38struct $io_arbiter;
39
40struct io_context_params {
41 int num_entries;
42};
43
44void ?{}(io_context_params & this);
45
46//-----------------------------------------------------------------------------
47// Processor
48extern struct cluster * mainCluster;
49
50// Processor id, required for scheduling threads
51
52
53coroutine processorCtx_t {
54 struct processor * proc;
55};
56
57// Wrapper around kernel threads
58struct __attribute__((aligned(128))) processor {
59 // Cluster from which to get threads
60 struct cluster * cltr;
61
62 // Ready Queue state per processor
63 struct {
64 unsigned short its;
65 unsigned short itr;
66 unsigned id;
67 unsigned target;
68 unsigned long long int cutoff;
69 } rdq;
70
71 // Set to true to notify the processor should terminate
72 volatile bool do_terminate;
73
74 // Coroutine ctx who does keeps the state of the processor
75 struct processorCtx_t runner;
76
77 // Name of the processor
78 const char * name;
79
80 // Handle to pthreads
81 pthread_t kernel_thread;
82
83 // Unique id for the processor (not per cluster)
84 unsigned unique_id;
85
86 struct {
87 $io_context * ctx;
88 bool pending;
89 bool dirty;
90 } io;
91
92 // Preemption data
93 // Node which is added in the discrete event simulaiton
94 struct alarm_node_t * preemption_alarm;
95
96 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
97 bool pending_preemption;
98
99 // Idle lock (kernel semaphore)
100 int idle;
101
102 // Termination synchronisation (user semaphore)
103 oneshot terminated;
104
105 // pthread Stack
106 void * stack;
107
108 // Link lists fields
109 inline dlink(processor);
110
111 // special init fields
112 // This is needed for memcached integration
113 // once memcached experiments are done this should probably be removed
114 // it is not a particularly safe scheme as it can make processors less homogeneous
115 struct {
116 $thread * thrd;
117 } init;
118
119 struct KernelThreadData * local_data;
120
121 #if !defined(__CFA_NO_STATISTICS__)
122 int print_stats;
123 bool print_halts;
124 #endif
125
126#ifdef __CFA_DEBUG__
127 // Last function to enable preemption on this processor
128 const char * last_enable;
129#endif
130};
131P9_EMBEDDED( processor, dlink(processor) )
132
133void ?{}(processor & this, const char name[], struct cluster & cltr);
134void ^?{}(processor & this);
135
136static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; }
137static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
138static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster}; }
139
140//-----------------------------------------------------------------------------
141// Cluster Tools
142
143// Intrusives lanes which are used by the ready queue
144struct __attribute__((aligned(128))) __intrusive_lane_t;
145void ?{}(__intrusive_lane_t & this);
146void ^?{}(__intrusive_lane_t & this);
147
148// Aligned timestamps which are used by the relaxed ready queue
149struct __attribute__((aligned(128))) __timestamp_t {
150 volatile unsigned long long tv;
151};
152
153static inline void ?{}(__timestamp_t & this) { this.tv = 0; }
154static inline void ^?{}(__timestamp_t & this) {}
155
156//TODO adjust cache size to ARCHITECTURE
157// Structure holding the relaxed ready queue
158struct __ready_queue_t {
159 // Data tracking the actual lanes
160 // On a seperate cacheline from the used struct since
161 // used can change on each push/pop but this data
162 // only changes on shrink/grow
163 struct {
164 // Arary of lanes
165 __intrusive_lane_t * volatile data;
166
167 // Array of times
168 __timestamp_t * volatile tscs;
169
170 // Number of lanes (empty or not)
171 volatile size_t count;
172 } lanes;
173};
174
175void ?{}(__ready_queue_t & this);
176void ^?{}(__ready_queue_t & this);
177#if !defined(__CFA_NO_STATISTICS__)
178 unsigned cnt(const __ready_queue_t & this, unsigned idx);
179#endif
180
181// Idle Sleep
182struct __cluster_proc_list {
183 // Spin lock protecting the queue
184 volatile uint64_t lock;
185
186 // Total number of processors
187 unsigned total;
188
189 // Total number of idle processors
190 unsigned idle;
191
192 // List of idle processors
193 dlist(processor) idles;
194
195 // List of active processors
196 dlist(processor) actives;
197};
198
199//-----------------------------------------------------------------------------
200// Cluster
201struct __attribute__((aligned(128))) cluster {
202 // Ready queue for threads
203 __ready_queue_t ready_queue;
204
205 // Name of the cluster
206 const char * name;
207
208 // Preemption rate on this cluster
209 Duration preemption_rate;
210
211 // List of idle processors
212 __cluster_proc_list procs;
213
214 // List of threads
215 __spinlock_t thread_list_lock;
216 __dllist_t(struct $thread) threads;
217 unsigned int nthreads;
218
219 // Link lists fields
220 struct __dbg_node_cltr {
221 cluster * next;
222 cluster * prev;
223 } node;
224
225 struct {
226 $io_arbiter * arbiter;
227 io_context_params params;
228 } io;
229
230 #if !defined(__CFA_NO_STATISTICS__)
231 struct __stats_t * stats;
232 int print_stats;
233 #endif
234};
235extern Duration default_preemption();
236
237void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
238void ^?{}(cluster & this);
239
240static inline void ?{} (cluster & this) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
241static inline void ?{} (cluster & this, Duration preemption_rate) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
242static inline void ?{} (cluster & this, const char name[]) { io_context_params default_params; this{name, default_preemption(), 1, default_params}; }
243static inline void ?{} (cluster & this, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
244static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
245static inline void ?{} (cluster & this, const char name[], unsigned num_io) { io_context_params default_params; this{name, default_preemption(), num_io, default_params}; }
246static inline void ?{} (cluster & this, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
247static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
248static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params) { this{name, default_preemption(), 1, io_params}; }
249static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
250static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
251static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params) { this{name, default_preemption(), num_io, io_params}; }
252
253static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
254
255static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE
256static inline struct cluster * active_cluster () { return publicTLS_get( this_processor )->cltr; }
257
258#if !defined(__CFA_NO_STATISTICS__)
259 void print_stats_now( cluster & this, int flags );
260
261 static inline void print_stats_at_exit( cluster & this, int flags ) {
262 this.print_stats |= flags;
263 }
264
265 static inline void print_stats_at_exit( processor & this, int flags ) {
266 this.print_stats |= flags;
267 }
268
269 void print_halts( processor & this );
270#endif
271
272// Local Variables: //
273// mode: c //
274// tab-width: 4 //
275// End: //
Note: See TracBrowser for help on using the repository browser.