source: libcfa/src/concurrency/kernel.hfa @ a80db97

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since a80db97 was 78da4ab, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

New implementation of io based on instance burrowing.
Trying to avoid the unbounded growth of the previous flat combining approach.

  • Property mode set to 100644
File size: 8.4 KB
RevLine 
[8118303]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[454f478]7// kernel -- Header containing the core of the kernel API
[8118303]8//
9// Author           : Thierry Delisle
[75f3522]10// Created On       : Tue Jan 17 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[e3fea42]12// Last Modified On : Tue Feb  4 12:29:26 2020
13// Update Count     : 22
[8118303]14//
15
[6b0b624]16#pragma once
[8118303]17
[bd98b58]18#include "invoke.h"
[73abe95]19#include "time_t.hfa"
[d76bd79]20#include "coroutine.hfa"
[bd98b58]21
[1eb239e4]22#include "containers/list.hfa"
[64a7146]23
[8def349]24extern "C" {
[c402739f]25        #include <bits/pthreadtypes.h>
[454f478]26        #include <pthread.h>
[c402739f]27        #include <linux/types.h>
[8def349]28}
29
[db6f06a]30//-----------------------------------------------------------------------------
[454f478]31// Underlying Locks
32#ifdef __CFA_WITH_VERIFY__
33        extern bool __cfaabi_dbg_in_kernel();
34#endif
35
36struct __bin_sem_t {
37        pthread_mutex_t         lock;
38        pthread_cond_t          cond;
39        int                     val;
[9c31349]40};
41
[78da4ab]42//-----------------------------------------------------------------------------
43// I/O
44struct cluster;
45struct $io_context;
46struct $io_arbiter;
47
48struct io_context_params {
49        int num_entries;
50};
51
52void  ?{}(io_context_params & this);
53
54struct io_context {
55        $io_context * ctx;
56        cluster * cltr;
57};
58void  ?{}(io_context & this, struct cluster & cl);
59void ^?{}(io_context & this);
60
[bd98b58]61//-----------------------------------------------------------------------------
[de94a60]62// Processor
[de6319f]63extern struct cluster * mainCluster;
[bd98b58]64
[9b1dcc2]65// Processor id, required for scheduling threads
66struct __processor_id_t {
[58d64a4]67        unsigned id:24;
68        bool full_proc:1;
[8834751]69
70        #if !defined(__CFA_NO_STATISTICS__)
71                struct __stats_t * stats;
72        #endif
[9b1dcc2]73};
74
[094476d]75coroutine processorCtx_t {
76        struct processor * proc;
77};
78
[e60e0dc]79// Wrapper around kernel threads
[37ba662]80struct __attribute__((aligned(128))) processor {
[e60e0dc]81        // Main state
[37ba662]82        inline __processor_id_t;
[025278e]83
84        // Cluster from which to get threads
[de94a60]85        struct cluster * cltr;
[025278e]86
[37ba662]87        // Set to true to notify the processor should terminate
88        volatile bool do_terminate;
89
90        // Coroutine ctx who does keeps the state of the processor
91        struct processorCtx_t runner;
92
[de6319f]93        // Name of the processor
94        const char * name;
95
[025278e]96        // Handle to pthreads
97        pthread_t kernel_thread;
[2ac095d]98
[78da4ab]99        struct {
100                $io_context * volatile ctx;
101                volatile bool lock;
102        } io;
103
[e60e0dc]104        // Preemption data
[025278e]105        // Node which is added in the discrete event simulaiton
106        struct alarm_node_t * preemption_alarm;
107
108        // If true, a preemption was triggered in an unsafe region, the processor must preempt as soon as possible
109        bool pending_preemption;
[c81ebf9]110
[92e7631]111        // Idle lock (kernel semaphore)
112        __bin_sem_t idle;
[85b1deb]113
[92e7631]114        // Termination synchronisation (user semaphore)
[454f478]115        oneshot terminated;
[de94a60]116
[27f5f71]117        // pthread Stack
118        void * stack;
119
[de94a60]120        // Link lists fields
[1eb239e4]121        DLISTED_MGD_IMPL_IN(processor)
[14a61b5]122
[c34ebf2]123        #if !defined(__CFA_NO_STATISTICS__)
[69fbc61]124                int print_stats;
[c34ebf2]125                bool print_halts;
126        #endif
127
[e60e0dc]128#ifdef __CFA_DEBUG__
[025278e]129        // Last function to enable preemption on this processor
[cdbfab0]130        const char * last_enable;
[e60e0dc]131#endif
[c84e80a]132};
133
[e3fea42]134void  ?{}(processor & this, const char name[], struct cluster & cltr);
[242a902]135void ^?{}(processor & this);
[c84e80a]136
[de6319f]137static inline void  ?{}(processor & this)                    { this{ "Anonymous Processor", *mainCluster}; }
[de94a60]138static inline void  ?{}(processor & this, struct cluster & cltr)    { this{ "Anonymous Processor", cltr}; }
[e3fea42]139static inline void  ?{}(processor & this, const char name[]) { this{name, *mainCluster }; }
[de6319f]140
[1eb239e4]141DLISTED_MGD_IMPL_OUT(processor)
[de94a60]142
[7768b8d]143//-----------------------------------------------------------------------------
144// Cluster Tools
[dca5802]145
146// Intrusives lanes which are used by the relaxed ready queue
[61d7bec]147struct __attribute__((aligned(128))) __intrusive_lane_t;
[dca5802]148void  ?{}(__intrusive_lane_t & this);
149void ^?{}(__intrusive_lane_t & this);
[7768b8d]150
[61d7bec]151// Counter used for wether or not the lanes are all empty
152struct __attribute__((aligned(128))) __snzi_node_t;
153struct __snzi_t {
154        unsigned mask;
155        int root;
156        __snzi_node_t * nodes;
157};
[b798713]158
[61d7bec]159void  ?{}( __snzi_t & this, unsigned depth );
160void ^?{}( __snzi_t & this );
[b798713]161
162//TODO adjust cache size to ARCHITECTURE
[dca5802]163// Structure holding the relaxed ready queue
[37ba662]164struct __ready_queue_t {
[dca5802]165        // Data tracking how many/which lanes are used
166        // Aligned to 128 for cache locality
[61d7bec]167        __snzi_t snzi;
[dca5802]168
169        // Data tracking the actual lanes
170        // On a seperate cacheline from the used struct since
171        // used can change on each push/pop but this data
172        // only changes on shrink/grow
[37ba662]173        struct {
[dca5802]174                // Arary of lanes
175                __intrusive_lane_t * volatile data;
176
177                // Number of lanes (empty or not)
[b798713]178                volatile size_t count;
[dca5802]179        } lanes;
[b798713]180};
181
182void  ?{}(__ready_queue_t & this);
183void ^?{}(__ready_queue_t & this);
184
[1eb239e4]185// Idle Sleep
186struct __cluster_idles {
187        // Spin lock protecting the queue
188        volatile uint64_t lock;
189
190        // Total number of processors
191        unsigned total;
192
193        // Total number of idle processors
194        unsigned idle;
195
196        // List of idle processors
197        dlist(processor, processor) list;
198};
199
[de94a60]200//-----------------------------------------------------------------------------
201// Cluster
[37ba662]202struct __attribute__((aligned(128))) cluster {
[de94a60]203        // Ready queue for threads
[b798713]204        __ready_queue_t ready_queue;
[de94a60]205
206        // Name of the cluster
207        const char * name;
208
209        // Preemption rate on this cluster
210        Duration preemption_rate;
211
[64a7146]212        // List of idle processors
[1eb239e4]213        __cluster_idles idles;
[de94a60]214
[d4e68a6]215        // List of threads
[a1a17a74]216        __spinlock_t thread_list_lock;
[ac2b598]217        __dllist_t(struct $thread) threads;
[d4e68a6]218        unsigned int nthreads;
[a1a17a74]219
[de94a60]220        // Link lists fields
[ea8b2f7]221        struct __dbg_node_cltr {
[de94a60]222                cluster * next;
223                cluster * prev;
224        } node;
[92976d9]225
[f00b26d4]226        struct {
[78da4ab]227                $io_arbiter * arbiter;
228                io_context_params params;
[f00b26d4]229        } io;
[038be32]230
231        #if !defined(__CFA_NO_STATISTICS__)
[8834751]232                struct __stats_t * stats;
[69fbc61]233                int print_stats;
[038be32]234        #endif
[de94a60]235};
236extern Duration default_preemption();
237
[f00b26d4]238void ?{} (cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params);
[de94a60]239void ^?{}(cluster & this);
240
[f00b26d4]241static inline void ?{} (cluster & this)                                            { io_context_params default_params;    this{"Anonymous Cluster", default_preemption(), 1, default_params}; }
242static inline void ?{} (cluster & this, Duration preemption_rate)                  { io_context_params default_params;    this{"Anonymous Cluster", preemption_rate, 1, default_params}; }
243static inline void ?{} (cluster & this, const char name[])                         { io_context_params default_params;    this{name, default_preemption(), 1, default_params}; }
244static inline void ?{} (cluster & this, unsigned num_io)                           { io_context_params default_params;    this{"Anonymous Cluster", default_preemption(), num_io, default_params}; }
245static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io) { io_context_params default_params;    this{"Anonymous Cluster", preemption_rate, num_io, default_params}; }
246static inline void ?{} (cluster & this, const char name[], unsigned num_io)        { io_context_params default_params;    this{name, default_preemption(), num_io, default_params}; }
247static inline void ?{} (cluster & this, const io_context_params & io_params)                                            { this{"Anonymous Cluster", default_preemption(), 1, io_params}; }
248static inline void ?{} (cluster & this, Duration preemption_rate, const io_context_params & io_params)                  { this{"Anonymous Cluster", preemption_rate, 1, io_params}; }
249static inline void ?{} (cluster & this, const char name[], const io_context_params & io_params)                         { this{name, default_preemption(), 1, io_params}; }
250static inline void ?{} (cluster & this, unsigned num_io, const io_context_params & io_params)                           { this{"Anonymous Cluster", default_preemption(), num_io, io_params}; }
251static inline void ?{} (cluster & this, Duration preemption_rate, unsigned num_io, const io_context_params & io_params) { this{"Anonymous Cluster", preemption_rate, num_io, io_params}; }
252static inline void ?{} (cluster & this, const char name[], unsigned num_io, const io_context_params & io_params)        { this{name, default_preemption(), num_io, io_params}; }
[de94a60]253
[c7a900a]254static inline [cluster *&, cluster *& ] __get( cluster & this ) __attribute__((const)) { return this.node.[next, prev]; }
[de94a60]255
[8fc652e0]256static inline struct processor * active_processor() { return publicTLS_get( this_processor ); } // UNSAFE
257static inline struct cluster   * active_cluster  () { return publicTLS_get( this_processor )->cltr; }
[d4e68a6]258
[038be32]259#if !defined(__CFA_NO_STATISTICS__)
[8fc652e0]260        void print_stats_now( cluster & this, int flags );
261
[69fbc61]262        static inline void print_stats_at_exit( cluster & this, int flags ) {
263                this.print_stats |= flags;
[038be32]264        }
[c34ebf2]265
[69fbc61]266        static inline void print_stats_at_exit( processor & this, int flags ) {
267                this.print_stats |= flags;
[c34ebf2]268        }
269
270        void print_halts( processor & this );
[038be32]271#endif
272
[8118303]273// Local Variables: //
[6b0b624]274// mode: c //
275// tab-width: 4 //
[8118303]276// End: //
Note: See TracBrowser for help on using the repository browser.