source: libcfa/src/concurrency/kernel_private.hfa @ 665edf40

ADTarm-ehast-experimentalenumforall-pointer-decayjacob/cs343-translationnew-ast-unique-exprpthread-emulationqualifiedEnum
Last change on this file since 665edf40 was 431cd4f, checked in by Thierry Delisle <tdelisle@…>, 4 years ago

Added alternative to relaxed-fifo scheduler.
Disabled by default

  • Property mode set to 100644
File size: 10.2 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel_private.hfa --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Feb 13 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Wed Aug 12 08:21:33 2020
13// Update Count     : 9
14//
15
16#pragma once
17
18#include "kernel.hfa"
19#include "thread.hfa"
20
21#include "alarm.hfa"
22#include "stats.hfa"
23
24//-----------------------------------------------------------------------------
25// Scheduler
26
27struct __attribute__((aligned(128))) __scheduler_lock_id_t;
28
29extern "C" {
30        void disable_interrupts() OPTIONAL_THREAD;
31        void enable_interrupts_noPoll();
32        void enable_interrupts( __cfaabi_dbg_ctx_param );
33}
34
35void __schedule_thread( $thread * )
36#if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
37        __attribute__((nonnull (1)))
38#endif
39;
40
41extern bool __preemption_enabled();
42
43//release/wake-up the following resources
44void __thread_finish( $thread * thrd );
45
46//-----------------------------------------------------------------------------
47// Processor
48void main(processorCtx_t *);
49
50void * __create_pthread( pthread_t *, void * (*)(void *), void * );
51void __destroy_pthread( pthread_t pthread, void * stack, void ** retval );
52
53
54
55extern cluster * mainCluster;
56
57//-----------------------------------------------------------------------------
58// Threads
59extern "C" {
60      void __cfactx_invoke_thread(void (*main)(void *), void * this);
61}
62
63__cfaabi_dbg_debug_do(
64        extern void __cfaabi_dbg_thread_register  ( $thread * thrd );
65        extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
66)
67
68#define TICKET_BLOCKED (-1) // thread is blocked
69#define TICKET_RUNNING ( 0) // thread is running
70#define TICKET_UNBLOCK ( 1) // thread should ignore next block
71
72//-----------------------------------------------------------------------------
73// Utils
74void doregister( struct cluster * cltr, struct $thread & thrd );
75void unregister( struct cluster * cltr, struct $thread & thrd );
76
77//-----------------------------------------------------------------------------
78// I/O
79$io_arbiter * create(void);
80void destroy($io_arbiter *);
81
82//=======================================================================
83// Cluster lock API
84//=======================================================================
85// Lock-Free registering/unregistering of threads
86// Register a processor to a given cluster and get its unique id in return
87void register_proc_id( struct __processor_id_t * );
88
89// Unregister a processor from a given cluster using its id, getting back the original pointer
90void unregister_proc_id( struct __processor_id_t * proc );
91
92//=======================================================================
93// Reader-writer lock implementation
94// Concurrent with doregister/unregister,
95//    i.e., threads can be added at any point during or between the entry/exit
96
97//-----------------------------------------------------------------------
98// simple spinlock underlying the RWLock
99// Blocking acquire
100static inline void __atomic_acquire(volatile bool * ll) {
101        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
102                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
103                        Pause();
104        }
105        /* paranoid */ verify(*ll);
106}
107
108// Non-Blocking acquire
109static inline bool __atomic_try_acquire(volatile bool * ll) {
110        return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
111}
112
113// Release
114static inline void __atomic_unlock(volatile bool * ll) {
115        /* paranoid */ verify(*ll);
116        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
117}
118
119// Cells use by the reader writer lock
120// while not generic it only relies on a opaque pointer
121struct __attribute__((aligned(128))) __scheduler_lock_id_t {
122        // Spin lock used as the underlying lock
123        volatile bool lock;
124
125        // Handle pointing to the proc owning this cell
126        // Used for allocating cells and debugging
127        __processor_id_t * volatile handle;
128
129        #ifdef __CFA_WITH_VERIFY__
130                // Debug, check if this is owned for reading
131                bool owned;
132        #endif
133};
134
135static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
136
137//-----------------------------------------------------------------------
138// Reader-Writer lock protecting the ready-queues
139// while this lock is mostly generic some aspects
140// have been hard-coded to for the ready-queue for
141// simplicity and performance
142struct __scheduler_RWLock_t {
143        // total cachelines allocated
144        unsigned int max;
145
146        // cachelines currently in use
147        volatile unsigned int alloc;
148
149        // cachelines ready to itereate over
150        // (!= to alloc when thread is in second half of doregister)
151        volatile unsigned int ready;
152
153        // writer lock
154        volatile bool lock;
155
156        // data pointer
157        __scheduler_lock_id_t * data;
158};
159
160void  ?{}(__scheduler_RWLock_t & this);
161void ^?{}(__scheduler_RWLock_t & this);
162
163extern __scheduler_RWLock_t * __scheduler_lock;
164
165//-----------------------------------------------------------------------
166// Reader side : acquire when using the ready queue to schedule but not
167//  creating/destroying queues
168static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
169        /* paranoid */ verify( ! __preemption_enabled() );
170        /* paranoid */ verify( kernelTLS().this_proc_id );
171
172        unsigned iproc = kernelTLS().this_proc_id->id;
173        /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
174        /*paranoid*/ verify(iproc < ready);
175
176        // Step 1 : make sure no writer are in the middle of the critical section
177        while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
178                Pause();
179
180        // Fence needed because we don't want to start trying to acquire the lock
181        // before we read a false.
182        // Not needed on x86
183        // std::atomic_thread_fence(std::memory_order_seq_cst);
184
185        // Step 2 : acquire our local lock
186        __atomic_acquire( &data[iproc].lock );
187        /*paranoid*/ verify(data[iproc].lock);
188
189        #ifdef __CFA_WITH_VERIFY__
190                // Debug, check if this is owned for reading
191                data[iproc].owned = true;
192        #endif
193}
194
195static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
196        /* paranoid */ verify( ! __preemption_enabled() );
197        /* paranoid */ verify( kernelTLS().this_proc_id );
198
199        unsigned iproc = kernelTLS().this_proc_id->id;
200        /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
201        /*paranoid*/ verify(iproc < ready);
202        /*paranoid*/ verify(data[iproc].lock);
203        /*paranoid*/ verify(data[iproc].owned);
204        #ifdef __CFA_WITH_VERIFY__
205                // Debug, check if this is owned for reading
206                data[iproc].owned = false;
207        #endif
208        __atomic_unlock(&data[iproc].lock);
209}
210
211#ifdef __CFA_WITH_VERIFY__
212        static inline bool ready_schedule_islocked(void) {
213                /* paranoid */ verify( ! __preemption_enabled() );
214                /*paranoid*/ verify( kernelTLS().this_proc_id );
215                __processor_id_t * proc = kernelTLS().this_proc_id;
216                return __scheduler_lock->data[proc->id].owned;
217        }
218
219        static inline bool ready_mutate_islocked() {
220                return __scheduler_lock->lock;
221        }
222#endif
223
224//-----------------------------------------------------------------------
225// Writer side : acquire when changing the ready queue, e.g. adding more
226//  queues or removing them.
227uint_fast32_t ready_mutate_lock( void );
228
229void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
230
231//-----------------------------------------------------------------------
232// Lock-Free registering/unregistering of threads
233// Register a processor to a given cluster and get its unique id in return
234// For convenience, also acquires the lock
235static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) {
236        register_proc_id( proc );
237        return ready_mutate_lock();
238}
239
240// Unregister a processor from a given cluster using its id, getting back the original pointer
241// assumes the lock is acquired
242static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) {
243        ready_mutate_unlock( last_s );
244        unregister_proc_id( proc );
245}
246
247//-----------------------------------------------------------------------
248// Cluster idle lock/unlock
249static inline void lock(__cluster_proc_list & this) {
250        /* paranoid */ verify( ! __preemption_enabled() );
251
252        // Start by locking the global RWlock so that we know no-one is
253        // adding/removing processors while we mess with the idle lock
254        ready_schedule_lock();
255
256        // Simple counting lock, acquired, acquired by incrementing the counter
257        // to an odd number
258        for() {
259                uint64_t l = this.lock;
260                if(
261                        (0 == (l % 2))
262                        && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
263                ) return;
264                Pause();
265        }
266
267        /* paranoid */ verify( ! __preemption_enabled() );
268}
269
270static inline void unlock(__cluster_proc_list & this) {
271        /* paranoid */ verify( ! __preemption_enabled() );
272
273        /* paranoid */ verify( 1 == (this.lock % 2) );
274        // Simple couting lock, release by incrementing to an even number
275        __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
276
277        // Release the global lock, which we acquired when locking
278        ready_schedule_unlock();
279
280        /* paranoid */ verify( ! __preemption_enabled() );
281}
282
283//=======================================================================
284// Ready-Queue API
285//-----------------------------------------------------------------------
286// push thread onto a ready queue for a cluster
287// returns true if the list was previously empty, false otherwise
288__attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd);
289
290//-----------------------------------------------------------------------
291// pop thread from the ready queue of a cluster
292// returns 0p if empty
293// May return 0p spuriously
294__attribute__((hot)) struct $thread * pop_fast(struct cluster * cltr);
295
296//-----------------------------------------------------------------------
297// pop thread from the ready queue of a cluster
298// returns 0p if empty
299// guaranteed to find any threads added before this call
300__attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr);
301
302//-----------------------------------------------------------------------
303// Increase the width of the ready queue (number of lanes) by 4
304void ready_queue_grow  (struct cluster * cltr);
305
306//-----------------------------------------------------------------------
307// Decrease the width of the ready queue (number of lanes) by 4
308void ready_queue_shrink(struct cluster * cltr);
309
310
311// Local Variables: //
312// mode: c //
313// tab-width: 4 //
314// End: //
Note: See TracBrowser for help on using the repository browser.