source: libcfa/src/concurrency/kernel.hfa@ 41ca6fa

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 41ca6fa was a1538cd, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Implemented part of the same C api for threads as libfibre.

  • Property mode set to 100644
File size: 8.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel -- Header containing the core of the kernel API
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Feb 4 12:29:26 2020
13// Update Count : 22
14//
15
16#pragma once
17
18#include "invoke.h"
19#include "time_t.hfa"
20#include "coroutine.hfa"
21
22#include "containers/list.hfa"
23
24extern "C" {
25 #include <bits/pthreadtypes.h>
26 #include <pthread.h>
27 #include <linux/types.h>
28}
29
30#ifdef __CFA_WITH_VERIFY__
31 extern bool __cfaabi_dbg_in_kernel();
32#endif
33
34//-----------------------------------------------------------------------------
35// I/O
36struct cluster;
37struct $io_context;
38struct $io_arbiter;
39
40struct io_context_params {
41 int num_entries;
42};
43
44void ?{}(io_context_params & this);
45
46//-----------------------------------------------------------------------------
47// Processor
48extern struct cluster * mainCluster;
49
50// Processor id, required for scheduling threads
51struct __processor_id_t {
52 unsigned id:24;
53 bool full_proc:1;
54
55 #if !defined(__CFA_NO_STATISTICS__)
56 struct __stats_t * stats;
57 #endif
58};
59
60coroutine processorCtx_t {
61 struct processor * proc;
62};
63
64// Wrapper around kernel threads
65struct __attribute__((aligned(128))) processor {
66 // Main state
67 inline __processor_id_t;
68
69 // Cluster from which to get threads
70 struct cluster * cltr;
71
72 // Set to true to notify the processor should terminate
73 volatile bool do_terminate;
74
75 // Coroutine ctx who does keeps the state of the processor
76 struct processorCtx_t runner;
77
78 // Name of the processor
79 const char * name;
80
81 // Handle to pthreads
82 pthread_t kernel_thread;
83
84 struct {
85 $io_context * ctx;
86 bool pending;
87 bool dirty;
88 } io;
89
90 // Preemption data
91 // Node which is added in the discrete event simulaiton
92 struct alarm_node_t * preemption_alarm;
93
94 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
95 bool pending_preemption;
96
97 // Idle lock (kernel semaphore)
98 int idle;
99
100 // Termination synchronisation (user semaphore)
101 oneshot terminated;
102
103 // pthread Stack
104 void * stack;
105
106 // Link lists fields
107 DLISTED_MGD_IMPL_IN(processor)
108
109 // special init fields
110 // This is needed for memcached integration
111 // once memcached experiments are done this should probably be removed
112 // it is not a particularly safe scheme as it can make processors less homogeneous
113 struct {
114 void (*fnc) (void *);
115 void * arg;
116 } init;
117
118 #if !defined(__CFA_NO_STATISTICS__)
119 int print_stats;
120 bool print_halts;
121 #endif
122
123#ifdef __CFA_DEBUG__
124 // Last function to enable preemption on this processor
125 const char * last_enable;
126#endif
127};
128
129void ?{}(processor & this, const char name[], struct cluster & cltr, void (*init) (void *), void * arg);
130void ^?{}(processor & this);
131
132static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster, 0p, 0p}; }
133static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr, 0p, 0p}; }
134static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster, 0p, 0p }; }
135
136DLISTED_MGD_IMPL_OUT(processor)
137
138//-----------------------------------------------------------------------------
139// Cluster Tools
140
141// Intrusives lanes which are used by the relaxed ready queue
142struct __attribute__((aligned(128))) __intrusive_lane_t;
143void ?{}(__intrusive_lane_t & this);
144void ^?{}(__intrusive_lane_t & this);
145
146// Counter used for wether or not the lanes are all empty
147struct __attribute__((aligned(128))) __snzi_node_t;
148struct __snzi_t {
149 unsigned mask;
150 int root;
151 __snzi_node_t * nodes;
152};
153
154void ?{}( __snzi_t & this, unsigned depth );
155void ^?{}( __snzi_t & this );
156
157//TODO adjust cache size to ARCHITECTURE
158// Structure holding the relaxed ready queue
159struct __ready_queue_t {
160 // Data tracking how many/which lanes are used
161 // Aligned to 128 for cache locality
162 __snzi_t snzi;
163
164 // Data tracking the actual lanes
165 // On a seperate cacheline from the used struct since
166 // used can change on each push/pop but this data
167 // only changes on shrink/grow
168 struct {
169 // Arary of lanes
170 __intrusive_lane_t * volatile data;
171
172 // Number of lanes (empty or not)
173 volatile size_t count;
174 } lanes;
175};
176
177void ?{}(__ready_queue_t & this);
178void ^?{}(__ready_queue_t & this);
179
180// Idle Sleep
181struct __cluster_idles {
182 // Spin lock protecting the queue
183 volatile uint64_t lock;
184
185 // Total number of processors
186 unsigned total;
187
188 // Total number of idle processors
189 unsigned idle;
190
191 // List of idle processors
192 dlist(processor, processor) list;
193};
194
195//-----------------------------------------------------------------------------
196// Cluster
197struct __attribute__((aligned(128))) cluster {
198 // Ready queue for threads
199 __ready_queue_t ready_queue;
200
201 // Name of the cluster
202 const char * name;
203
204 // Preemption rate on this cluster
205 Duration preemption_rate;
206
207 // List of idle processors
208 __cluster_idles idles;
209
210 // List of threads
211 __spinlock_t thread_list_lock;
212 __dllist_t(struct $thread) threads;
213 unsigned int nthreads;
214
215 // Link lists fields
216 struct __dbg_node_cltr {
217 cluster * next;
218 cluster * prev;
219 } node;
220
221 struct {
222 $io_arbiter * arbiter;
223 io_context_params params;
224 } io;
225
226 #if !defined(__CFA_NO_STATISTICS__)
227 struct __stats_t * stats;
228 int print_stats;
229 #endif
230};
231extern Duration default_preemption();
232
233void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
234void ^?{}(cluster & this);
235
236static inline void ?{} (cluster & this) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
237static inline void ?{} (cluster & this, Duration preemption_rate) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
238static inline void ?{} (cluster & this, const char name[]) { io_context_params default_params; this{name, default_preemption(), 1, default_params}; }
239static inline void ?{} (cluster & this, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
240static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
241static inline void ?{} (cluster & this, const char name[], unsigned num_io) { io_context_params default_params; this{name, default_preemption(), num_io, default_params}; }
242static inline void ?{} (cluster & this, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
243static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
244static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params) { this{name, default_preemption(), 1, io_params}; }
245static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
246static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
247static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params) { this{name, default_preemption(), num_io, io_params}; }
248
249static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
250
251static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE
252static inline struct cluster * active_cluster () { return publicTLS_get( this_processor )->cltr; }
253
254#if !defined(__CFA_NO_STATISTICS__)
255 void print_stats_now( cluster & this, int flags );
256
257 static inline void print_stats_at_exit( cluster & this, int flags ) {
258 this.print_stats |= flags;
259 }
260
261 static inline void print_stats_at_exit( processor & this, int flags ) {
262 this.print_stats |= flags;
263 }
264
265 void print_halts( processor & this );
266#endif
267
268// Local Variables: //
269// mode: c //
270// tab-width: 4 //
271// End: //
Note: See TracBrowser for help on using the repository browser.