source: libcfa/src/concurrency/kernel.hfa@ 0b18db7

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 0b18db7 was 58d64a4, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

processor_id_t now have a bool to state if it is a full processor.
kernelTLS.this_thread now has much tighter lifetime.
Remove warning in kernel.cfa

  • Property mode set to 100644
File size: 9.0 KB
RevLine 
[8118303]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[75a17f1]7// kernel --
[8118303]8//
9// Author : Thierry Delisle
[75f3522]10// Created On : Tue Jan 17 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[e3fea42]12// Last Modified On : Tue Feb 4 12:29:26 2020
13// Update Count : 22
[8118303]14//
15
[6b0b624]16#pragma once
[8118303]17
[bd98b58]18#include "invoke.h"
[73abe95]19#include "time_t.hfa"
[d76bd79]20#include "coroutine.hfa"
[bd98b58]21
[1eb239e4]22#include "containers/list.hfa"
[64a7146]23
[8def349]24extern "C" {
[c402739f]25 #include <bits/pthreadtypes.h>
26 #include <linux/types.h>
[8def349]27}
28
[db6f06a]29//-----------------------------------------------------------------------------
30// Locks
[bdeba0b]31struct semaphore {
[ea7d2b0]32 __spinlock_t lock;
[bdeba0b]33 int count;
[ac2b598]34 __queue_t($thread) waiting;
[9c31349]35};
36
[242a902]37void ?{}(semaphore & this, int count = 1);
38void ^?{}(semaphore & this);
[71c8b7e]39bool P (semaphore & this);
[f0ce5f4]40bool V (semaphore & this);
[d384787]41bool V (semaphore & this, unsigned count);
[9c31349]42
[db6f06a]43
[bd98b58]44//-----------------------------------------------------------------------------
[de94a60]45// Processor
[de6319f]46extern struct cluster * mainCluster;
[bd98b58]47
[9b1dcc2]48// Processor id, required for scheduling threads
49struct __processor_id_t {
[58d64a4]50 unsigned id:24;
51 bool full_proc:1;
[8834751]52
53 #if !defined(__CFA_NO_STATISTICS__)
54 struct __stats_t * stats;
55 #endif
[9b1dcc2]56};
57
[094476d]58coroutine processorCtx_t {
59 struct processor * proc;
60};
61
[e60e0dc]62// Wrapper around kernel threads
[37ba662]63struct __attribute__((aligned(128))) processor {
[e60e0dc]64 // Main state
[37ba662]65 inline __processor_id_t;
[025278e]66
67 // Cluster from which to get threads
[de94a60]68 struct cluster * cltr;
[025278e]69
[37ba662]70 // Set to true to notify the processor should terminate
71 volatile bool do_terminate;
72
73 // Coroutine ctx who does keeps the state of the processor
74 struct processorCtx_t runner;
75
[de6319f]76 // Name of the processor
77 const char * name;
78
[025278e]79 // Handle to pthreads
80 pthread_t kernel_thread;
[2ac095d]81
[e60e0dc]82 // RunThread data
[025278e]83 // Action to do after a thread is ran
[ac2b598]84 $thread * destroyer;
[c81ebf9]85
[e60e0dc]86 // Preemption data
[025278e]87 // Node which is added in the discrete event simulaiton
88 struct alarm_node_t * preemption_alarm;
89
90 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
91 bool pending_preemption;
[c81ebf9]92
[92e7631]93 // Idle lock (kernel semaphore)
94 __bin_sem_t idle;
[85b1deb]95
[92e7631]96 // Termination synchronisation (user semaphore)
[85b1deb]97 semaphore terminated;
[de94a60]98
[27f5f71]99 // pthread Stack
100 void * stack;
101
[de94a60]102 // Link lists fields
[1eb239e4]103 DLISTED_MGD_IMPL_IN(processor)
[14a61b5]104
[c34ebf2]105 #if !defined(__CFA_NO_STATISTICS__)
[69fbc61]106 int print_stats;
[c34ebf2]107 bool print_halts;
108 #endif
109
[e60e0dc]110#ifdef __CFA_DEBUG__
[025278e]111 // Last function to enable preemption on this processor
[cdbfab0]112 const char * last_enable;
[e60e0dc]113#endif
[c84e80a]114};
115
[e3fea42]116void ?{}(processor & this, const char name[], struct cluster & cltr);
[242a902]117void ^?{}(processor & this);
[c84e80a]118
[de6319f]119static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; }
[de94a60]120static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
[e3fea42]121static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
[de6319f]122
[1eb239e4]123DLISTED_MGD_IMPL_OUT(processor)
[de94a60]124
[92976d9]125//-----------------------------------------------------------------------------
126// I/O
[61dd73d]127struct __io_data;
[92976d9]128
[f00b26d4]129// IO poller user-thread
130// Not using the "thread" keyword because we want to control
131// more carefully when to start/stop it
132struct $io_ctx_thread {
133 struct __io_data * ring;
134 single_sem sem;
135 volatile bool done;
136 $thread self;
137};
138
139
140struct io_context {
141 $io_ctx_thread thrd;
142};
143
144struct io_context_params {
145 int num_entries;
146 int num_ready;
147 int submit_aff;
148 bool eager_submits:1;
149 bool poller_submits:1;
150 bool poll_submit:1;
151 bool poll_complete:1;
152};
[de94a60]153
[f00b26d4]154void ?{}(io_context_params & this);
155
156void ?{}(io_context & this, struct cluster & cl);
157void ?{}(io_context & this, struct cluster & cl, const io_context_params & params);
158void ^?{}(io_context & this);
159
160struct io_cancellation {
[c402739f]161 __u64 target;
[f00b26d4]162};
163
164static inline void ?{}(io_cancellation & this) { this.target = -1u; }
165static inline void ^?{}(io_cancellation & this) {}
166bool cancel(io_cancellation & this);
[7768b8d]167
168//-----------------------------------------------------------------------------
169// Cluster Tools
[dca5802]170
171// Intrusives lanes which are used by the relaxed ready queue
[61d7bec]172struct __attribute__((aligned(128))) __intrusive_lane_t;
[dca5802]173void ?{}(__intrusive_lane_t & this);
174void ^?{}(__intrusive_lane_t & this);
[7768b8d]175
[61d7bec]176// Counter used for wether or not the lanes are all empty
177struct __attribute__((aligned(128))) __snzi_node_t;
178struct __snzi_t {
179 unsigned mask;
180 int root;
181 __snzi_node_t * nodes;
182};
[b798713]183
[61d7bec]184void ?{}( __snzi_t & this, unsigned depth );
185void ^?{}( __snzi_t & this );
[b798713]186
187//TODO adjust cache size to ARCHITECTURE
[dca5802]188// Structure holding the relaxed ready queue
[37ba662]189struct __ready_queue_t {
[dca5802]190 // Data tracking how many/which lanes are used
191 // Aligned to 128 for cache locality
[61d7bec]192 __snzi_t snzi;
[dca5802]193
194 // Data tracking the actual lanes
195 // On a seperate cacheline from the used struct since
196 // used can change on each push/pop but this data
197 // only changes on shrink/grow
[37ba662]198 struct {
[dca5802]199 // Arary of lanes
200 __intrusive_lane_t * volatile data;
201
202 // Number of lanes (empty or not)
[b798713]203 volatile size_t count;
[dca5802]204 } lanes;
[b798713]205};
206
207void ?{}(__ready_queue_t & this);
208void ^?{}(__ready_queue_t & this);
209
[1eb239e4]210// Idle Sleep
211struct __cluster_idles {
212 // Spin lock protecting the queue
213 volatile uint64_t lock;
214
215 // Total number of processors
216 unsigned total;
217
218 // Total number of idle processors
219 unsigned idle;
220
221 // List of idle processors
222 dlist(processor, processor) list;
223};
224
[de94a60]225//-----------------------------------------------------------------------------
226// Cluster
[37ba662]227struct __attribute__((aligned(128))) cluster {
[de94a60]228 // Ready queue for threads
[b798713]229 __ready_queue_t ready_queue;
[de94a60]230
231 // Name of the cluster
232 const char * name;
233
234 // Preemption rate on this cluster
235 Duration preemption_rate;
236
[64a7146]237 // List of idle processors
[1eb239e4]238 __cluster_idles idles;
[de94a60]239
[d4e68a6]240 // List of threads
[a1a17a74]241 __spinlock_t thread_list_lock;
[ac2b598]242 __dllist_t(struct $thread) threads;
[d4e68a6]243 unsigned int nthreads;
[a1a17a74]244
[de94a60]245 // Link lists fields
[ea8b2f7]246 struct __dbg_node_cltr {
[de94a60]247 cluster * next;
248 cluster * prev;
249 } node;
[92976d9]250
[f00b26d4]251 struct {
252 io_context * ctxs;
253 unsigned cnt;
254 } io;
[038be32]255
256 #if !defined(__CFA_NO_STATISTICS__)
[8834751]257 struct __stats_t * stats;
[69fbc61]258 int print_stats;
[038be32]259 #endif
[de94a60]260};
261extern Duration default_preemption();
262
[f00b26d4]263void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
[de94a60]264void ^?{}(cluster & this);
265
[f00b26d4]266static inline void ?{} (cluster & this) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
267static inline void ?{} (cluster & this, Duration preemption_rate) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
268static inline void ?{} (cluster & this, const char name[]) { io_context_params default_params; this{name, default_preemption(), 1, default_params}; }
269static inline void ?{} (cluster & this, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
270static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params; this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
271static inline void ?{} (cluster & this, const char name[], unsigned num_io) { io_context_params default_params; this{name, default_preemption(), num_io, default_params}; }
272static inline void ?{} (cluster & this, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
273static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
274static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params) { this{name, default_preemption(), 1, io_params}; }
275static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
276static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
277static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params) { this{name, default_preemption(), num_io, io_params}; }
[de94a60]278
[c7a900a]279static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
[de94a60]280
[d4e68a6]281static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE
282static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; }
283
[038be32]284#if !defined(__CFA_NO_STATISTICS__)
[69fbc61]285 static inline void print_stats_at_exit( cluster & this, int flags ) {
286 this.print_stats |= flags;
[038be32]287 }
[c34ebf2]288
[69fbc61]289 static inline void print_stats_at_exit( processor & this, int flags ) {
290 this.print_stats |= flags;
[c34ebf2]291 }
292
293 void print_halts( processor & this );
[038be32]294#endif
295
[8118303]296// Local Variables: //
[6b0b624]297// mode: c //
298// tab-width: 4 //
[8118303]299// End: //
Note: See TracBrowser for help on using the repository browser.