source: libcfa/src/concurrency/kernel.hfa@ 8c60d59

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 8c60d59 was 2f1cb37, checked in by Thierry Delisle <tdelisle@…>, 6 years ago

Merge branch 'master' into relaxed_ready

  • Property mode set to 100644
File size: 9.1 KB
RevLine 
[8118303]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[75a17f1]7// kernel --
[8118303]8//
9// Author : Thierry Delisle
[75f3522]10// Created On : Tue Jan 17 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[e3fea42]12// Last Modified On : Tue Feb 4 12:29:26 2020
13// Update Count : 22
[8118303]14//
15
[6b0b624]16#pragma once
[8118303]17
[c84e80a]18#include <stdbool.h>
[92976d9]19#include <stdint.h>
[8118303]20
[bd98b58]21#include "invoke.h"
[73abe95]22#include "time_t.hfa"
[d76bd79]23#include "coroutine.hfa"
[bd98b58]24
[8def349]25extern "C" {
26#include <pthread.h>
[6b4cdd3]27#include <semaphore.h>
[8def349]28}
29
[db6f06a]30//-----------------------------------------------------------------------------
31// Locks
[bdeba0b]32struct semaphore {
[ea7d2b0]33 __spinlock_t lock;
[bdeba0b]34 int count;
[ac2b598]35 __queue_t($thread) waiting;
[9c31349]36};
37
[242a902]38void ?{}(semaphore & this, int count = 1);
39void ^?{}(semaphore & this);
[71c8b7e]40bool P (semaphore & this);
[f0ce5f4]41bool V (semaphore & this);
[d384787]42bool V (semaphore & this, unsigned count);
[9c31349]43
[db6f06a]44
[bd98b58]45//-----------------------------------------------------------------------------
[de94a60]46// Processor
[de6319f]47extern struct cluster * mainCluster;
[bd98b58]48
[e60e0dc]49// Processor
[094476d]50coroutine processorCtx_t {
51 struct processor * proc;
52};
53
[e60e0dc]54// Wrapper around kernel threads
[c84e80a]55struct processor {
[e60e0dc]56 // Main state
[025278e]57 // Coroutine ctx who does keeps the state of the processor
[094476d]58 struct processorCtx_t runner;
[025278e]59
60 // Cluster from which to get threads
[de94a60]61 struct cluster * cltr;
[7768b8d]62 unsigned int id;
[025278e]63
[de6319f]64 // Name of the processor
65 const char * name;
66
[025278e]67 // Handle to pthreads
68 pthread_t kernel_thread;
[2ac095d]69
[e60e0dc]70 // RunThread data
[025278e]71 // Action to do after a thread is ran
[ac2b598]72 $thread * destroyer;
[c81ebf9]73
[e60e0dc]74 // Preemption data
[025278e]75 // Node which is added in the discrete event simulaiton
76 struct alarm_node_t * preemption_alarm;
77
78 // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
79 bool pending_preemption;
[c81ebf9]80
[92e7631]81 // Idle lock (kernel semaphore)
82 __bin_sem_t idle;
[85b1deb]83
84 // Termination
85 // Set to true to notify the processor should terminate
86 volatile bool do_terminate;
87
[92e7631]88 // Termination synchronisation (user semaphore)
[85b1deb]89 semaphore terminated;
[de94a60]90
[27f5f71]91 // pthread Stack
92 void * stack;
93
[de94a60]94 // Link lists fields
[504a7dc]95 struct __dbg_node_cltr {
96 processor * next;
97 processor * prev;
[de94a60]98 } node;
[14a61b5]99
[e60e0dc]100#ifdef __CFA_DEBUG__
[025278e]101 // Last function to enable preemption on this processor
[cdbfab0]102 const char * last_enable;
[e60e0dc]103#endif
[c84e80a]104};
105
[e3fea42]106void ?{}(processor & this, const char name[], struct cluster & cltr);
[242a902]107void ^?{}(processor & this);
[c84e80a]108
[de6319f]109static inline void ?{}(processor & this) { this{ "Anonymous Processor", *mainCluster}; }
[de94a60]110static inline void ?{}(processor & this, struct cluster & cltr) { this{ "Anonymous Processor", cltr}; }
[e3fea42]111static inline void ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
[de6319f]112
[c7a900a]113static inline [processor *&, processor *& ] __get( processor & this ) __attribute__((const)) { return this.node.[next, prev]; }
[de94a60]114
[92976d9]115//-----------------------------------------------------------------------------
116// I/O
[61dd73d]117struct __io_data;
[92976d9]118
[5dadc9b7]119#define CFA_CLUSTER_IO_POLLER_USER_THREAD 1 << 0 // 0x1
120#define CFA_CLUSTER_IO_POLLER_THREAD_SUBMITS 1 << 1 // 0x2
121// #define CFA_CLUSTER_IO_POLLER_KERNEL_SIDE 1 << 2 // 0x4
[dd4e2d7]122#define CFA_CLUSTER_IO_BUFFLEN_OFFSET 16
[de94a60]123
[7768b8d]124
125//-----------------------------------------------------------------------------
126// Cluster Tools
[dca5802]127
128// Cells use by the reader writer lock
129// while not generic it only relies on a opaque pointer
[7768b8d]130struct __processor_id;
131
132// Reader-Writer lock protecting the ready-queue
[dca5802]133// while this lock is mostly generic some aspects
134// have been hard-coded to for the ready-queue for
135// simplicity and performance
[7768b8d]136struct __clusterRWLock_t {
137 // total cachelines allocated
138 unsigned int max;
139
140 // cachelines currently in use
141 volatile unsigned int alloc;
142
143 // cachelines ready to itereate over
144 // (!= to alloc when thread is in second half of doregister)
145 volatile unsigned int ready;
146
147 // writer lock
148 volatile bool lock;
149
150 // data pointer
151 __processor_id * data;
152};
153
154void ?{}(__clusterRWLock_t & this);
155void ^?{}(__clusterRWLock_t & this);
156
[dca5802]157// Intrusives lanes which are used by the relaxed ready queue
158struct __attribute__((aligned(128))) __intrusive_lane_t {
[7768b8d]159 // spin lock protecting the queue
160 volatile bool lock;
161
162 // anchor for the head and the tail of the queue
163 struct __sentinel_t {
[b798713]164 // Link lists fields
165 // instrusive link field for threads
[504a7dc]166 // must be exactly as in $thread
[b798713]167 __thread_desc_link link;
[7768b8d]168 } before, after;
169
[dca5802]170#if defined(__CFA_WITH_VERIFY__)
171 // id of last processor to acquire the lock
172 // needed only to check for mutual exclusion violations
173 unsigned int last_id;
174
175 // number of items on this list
176 // needed only to check for deadlocks
177 unsigned int count;
178#endif
179
[7768b8d]180 // Optional statistic counters
[b798713]181 #if !defined(__CFA_NO_SCHED_STATS__)
[7768b8d]182 struct __attribute__((aligned(64))) {
183 // difference between number of push and pops
184 ssize_t diff;
185
186 // total number of pushes and pops
187 size_t push;
188 size_t pop ;
189 } stat;
190 #endif
191};
192
[dca5802]193void ?{}(__intrusive_lane_t & this);
194void ^?{}(__intrusive_lane_t & this);
[7768b8d]195
[b798713]196typedef unsigned long long __cfa_readyQ_mask_t;
197
198// enum {
199// __cfa_ready_queue_mask_size = (64 - sizeof(size_t)) / sizeof(size_t),
200// __cfa_max_ready_queues = __cfa_ready_queue_mask_size * 8 * sizeof(size_t)
201// };
202
[dca5802]203#define __cfa_lane_mask_size ((64 - sizeof(size_t)) / sizeof(__cfa_readyQ_mask_t))
204#define __cfa_max_lanes (__cfa_lane_mask_size * 8 * sizeof(__cfa_readyQ_mask_t))
[b798713]205
206//TODO adjust cache size to ARCHITECTURE
[dca5802]207// Structure holding the relaxed ready queue
[b798713]208struct __attribute__((aligned(128))) __ready_queue_t {
[dca5802]209 // Data tracking how many/which lanes are used
210 // Aligned to 128 for cache locality
[b798713]211 struct {
[dca5802]212 // number of non-empty lanes
[b798713]213 volatile size_t count;
214
[dca5802]215 // bit mask, set bits indentify which lanes are non-empty
216 volatile __cfa_readyQ_mask_t mask[ __cfa_lane_mask_size ];
217 } used;
218
219 // Data tracking the actual lanes
220 // On a seperate cacheline from the used struct since
221 // used can change on each push/pop but this data
222 // only changes on shrink/grow
[b798713]223 struct __attribute__((aligned(64))) {
[dca5802]224 // Arary of lanes
225 __intrusive_lane_t * volatile data;
226
227 // Number of lanes (empty or not)
[b798713]228 volatile size_t count;
[dca5802]229 } lanes;
[b798713]230
[dca5802]231 // Statistics
[b798713]232 #if !defined(__CFA_NO_STATISTICS__)
233 __attribute__((aligned(64))) struct {
234 struct {
[dca5802]235 // Push statistic
[b798713]236 struct {
[dca5802]237 // number of attemps at pushing something
[b798713]238 volatile size_t attempt;
[dca5802]239
240 // number of successes at pushing
[b798713]241 volatile size_t success;
242 } push;
[dca5802]243
244 // Pop statistic
[b798713]245 struct {
[dca5802]246 // number of reads of the mask
247 // picking an empty __cfa_readyQ_mask_t counts here
248 // but not as an attempt
[b798713]249 volatile size_t maskrds;
[dca5802]250
251 // number of attemps at poping something
[b798713]252 volatile size_t attempt;
[dca5802]253
254 // number of successes at poping
[b798713]255 volatile size_t success;
256 } pop;
257 } pick;
[dca5802]258
259 // stats on the "used" struct of the queue
260 // tracks average number of queues that are not empty
261 // when pushing / poping
[b798713]262 struct {
263 volatile size_t value;
264 volatile size_t count;
[dca5802]265 } used;
[b798713]266 } global_stats;
267
268 #endif
269};
270
271void ?{}(__ready_queue_t & this);
272void ^?{}(__ready_queue_t & this);
273
[de94a60]274//-----------------------------------------------------------------------------
275// Cluster
276struct cluster {
277 // Ready queue locks
[7768b8d]278 __clusterRWLock_t ready_lock;
[de94a60]279
280 // Ready queue for threads
[b798713]281 __ready_queue_t ready_queue;
[de94a60]282
283 // Name of the cluster
284 const char * name;
285
286 // Preemption rate on this cluster
287 Duration preemption_rate;
288
289 // List of processors
[504a7dc]290 __spinlock_t idle_lock;
291 __dllist_t(struct processor) procs;
[de94a60]292 __dllist_t(struct processor) idles;
[504a7dc]293 unsigned int nprocessors;
[de94a60]294
[d4e68a6]295 // List of threads
[a1a17a74]296 __spinlock_t thread_list_lock;
[ac2b598]297 __dllist_t(struct $thread) threads;
[d4e68a6]298 unsigned int nthreads;
[a1a17a74]299
[de94a60]300 // Link lists fields
[ea8b2f7]301 struct __dbg_node_cltr {
[de94a60]302 cluster * next;
303 cluster * prev;
304 } node;
[92976d9]305
[61dd73d]306 struct __io_data * io;
[038be32]307
308 #if !defined(__CFA_NO_STATISTICS__)
309 bool print_stats;
310 #endif
[de94a60]311};
312extern Duration default_preemption();
313
[dd4e2d7]314void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned flags);
[de94a60]315void ^?{}(cluster & this);
316
[dd4e2d7]317static inline void ?{} (cluster & this) { this{"Anonymous Cluster", default_preemption(), 0}; }
318static inline void ?{} (cluster & this, Duration preemption_rate) { this{"Anonymous Cluster", preemption_rate, 0}; }
319static inline void ?{} (cluster & this, const char name[]) { this{name, default_preemption(), 0}; }
320static inline void ?{} (cluster & this, unsigned flags) { this{"Anonymous Cluster", default_preemption(), flags}; }
321static inline void ?{} (cluster & this, Duration preemption_rate, unsigned flags) { this{"Anonymous Cluster", preemption_rate, flags}; }
322static inline void ?{} (cluster & this, const char name[], unsigned flags) { this{name, default_preemption(), flags}; }
[de94a60]323
[c7a900a]324static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
[de94a60]325
[d4e68a6]326static inline struct processor * active_processor() { return TL_GET( this_processor ); } // UNSAFE
327static inline struct cluster * active_cluster () { return TL_GET( this_processor )->cltr; }
328
[038be32]329#if !defined(__CFA_NO_STATISTICS__)
330 static inline void print_stats_at_exit( cluster & this ) {
331 this.print_stats = true;
332 }
333#endif
334
[8118303]335// Local Variables: //
[6b0b624]336// mode: c //
337// tab-width: 4 //
[8118303]338// End: //
Note: See TracBrowser for help on using the repository browser.