source: libcfa/src/concurrency/kernel_private.hfa@ 734908c

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since 734908c was a3821fa, checked in by Thierry Delisle <tdelisle@…>, 5 years ago

Changed enable interrupts:

  • no longer save the caller for debugging
  • now polls based on parameter passed in
  • Property mode set to 100644
File size: 10.1 KB
RevLine 
[75f3522]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[73abe95]7// kernel_private.hfa --
[75f3522]8//
9// Author : Thierry Delisle
10// Created On : Mon Feb 13 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[fd9b524]12// Last Modified On : Wed Aug 12 08:21:33 2020
13// Update Count : 9
[75f3522]14//
15
[6b0b624]16#pragma once
[75f3522]17
[58b6d1b]18#include "kernel.hfa"
19#include "thread.hfa"
[75f3522]20
[73abe95]21#include "alarm.hfa"
[8834751]22#include "stats.hfa"
[fa21ac9]23
[75f3522]24//-----------------------------------------------------------------------------
25// Scheduler
[1c273d0]26
[37ba662]27struct __attribute__((aligned(128))) __scheduler_lock_id_t;
[9b1dcc2]28
[1c273d0]29extern "C" {
[2026bb6]30 void disable_interrupts() OPTIONAL_THREAD;
[a3821fa]31 void enable_interrupts( bool poll = true );
[1c273d0]32}
33
[e873838]34void __schedule_thread( $thread * )
[efc171d1]35#if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
[e873838]36 __attribute__((nonnull (1)))
[efc171d1]37#endif
38;
[75f3522]39
[8fc652e0]40extern bool __preemption_enabled();
41
[5afb49a]42//release/wake-up the following resources
43void __thread_finish( $thread * thrd );
[db6f06a]44
[75f3522]45//-----------------------------------------------------------------------------
46// Processor
47void main(processorCtx_t *);
[85b1deb]48
[8c50aed]49void * __create_pthread( pthread_t *, void * (*)(void *), void * );
[bfcf6b9]50void __destroy_pthread( pthread_t pthread, void * stack, void ** retval );
[1805b1b]51
[85b1deb]52
[75f3522]53
[6502a2b]54extern cluster * mainCluster;
55
[75f3522]56//-----------------------------------------------------------------------------
57// Threads
58extern "C" {
[c7a900a]59 void __cfactx_invoke_thread(void (*main)(void *), void * this);
[75f3522]60}
61
[f7d6bb0]62__cfaabi_dbg_debug_do(
[ac2b598]63 extern void __cfaabi_dbg_thread_register ( $thread * thrd );
64 extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
[f7d6bb0]65)
66
[6a77224]67#define TICKET_BLOCKED (-1) // thread is blocked
68#define TICKET_RUNNING ( 0) // thread is running
69#define TICKET_UNBLOCK ( 1) // thread should ignore next block
70
[969b3fe]71//-----------------------------------------------------------------------------
72// Utils
[ac2b598]73void doregister( struct cluster * cltr, struct $thread & thrd );
74void unregister( struct cluster * cltr, struct $thread & thrd );
[de94a60]75
[f00b26d4]76//-----------------------------------------------------------------------------
77// I/O
[78da4ab]78$io_arbiter * create(void);
79void destroy($io_arbiter *);
[f00b26d4]80
[7768b8d]81//=======================================================================
82// Cluster lock API
83//=======================================================================
84// Lock-Free registering/unregistering of threads
85// Register a processor to a given cluster and get its unique id in return
[a33c113]86void register_proc_id( struct __processor_id_t * );
[7768b8d]87
88// Unregister a processor from a given cluster using its id, getting back the original pointer
[a33c113]89void unregister_proc_id( struct __processor_id_t * proc );
[7768b8d]90
91//=======================================================================
92// Reader-writer lock implementation
93// Concurrent with doregister/unregister,
94// i.e., threads can be added at any point during or between the entry/exit
[dca5802]95
96//-----------------------------------------------------------------------
97// simple spinlock underlying the RWLock
98// Blocking acquire
[7768b8d]99static inline void __atomic_acquire(volatile bool * ll) {
100 while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
101 while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
[fd9b524]102 Pause();
[7768b8d]103 }
104 /* paranoid */ verify(*ll);
105}
106
[dca5802]107// Non-Blocking acquire
[7768b8d]108static inline bool __atomic_try_acquire(volatile bool * ll) {
[b798713]109 return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
[7768b8d]110}
111
[dca5802]112// Release
[7768b8d]113static inline void __atomic_unlock(volatile bool * ll) {
114 /* paranoid */ verify(*ll);
115 __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
116}
117
[a33c113]118// Cells use by the reader writer lock
119// while not generic it only relies on a opaque pointer
120struct __attribute__((aligned(128))) __scheduler_lock_id_t {
121 // Spin lock used as the underlying lock
122 volatile bool lock;
123
124 // Handle pointing to the proc owning this cell
125 // Used for allocating cells and debugging
126 __processor_id_t * volatile handle;
127
128 #ifdef __CFA_WITH_VERIFY__
129 // Debug, check if this is owned for reading
130 bool owned;
131 #endif
132};
133
134static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
135
[b388ee81]136//-----------------------------------------------------------------------
137// Reader-Writer lock protecting the ready-queues
138// while this lock is mostly generic some aspects
139// have been hard-coded to for the ready-queue for
140// simplicity and performance
141struct __scheduler_RWLock_t {
142 // total cachelines allocated
143 unsigned int max;
144
145 // cachelines currently in use
146 volatile unsigned int alloc;
147
148 // cachelines ready to itereate over
149 // (!= to alloc when thread is in second half of doregister)
150 volatile unsigned int ready;
151
152 // writer lock
153 volatile bool lock;
154
155 // data pointer
[9b1dcc2]156 __scheduler_lock_id_t * data;
[b388ee81]157};
158
159void ?{}(__scheduler_RWLock_t & this);
160void ^?{}(__scheduler_RWLock_t & this);
161
162extern __scheduler_RWLock_t * __scheduler_lock;
163
[7768b8d]164//-----------------------------------------------------------------------
165// Reader side : acquire when using the ready queue to schedule but not
166// creating/destroying queues
[e873838]167static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
[8fc652e0]168 /* paranoid */ verify( ! __preemption_enabled() );
169 /* paranoid */ verify( kernelTLS().this_proc_id );
[e873838]170
[8fc652e0]171 unsigned iproc = kernelTLS().this_proc_id->id;
172 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
[7768b8d]173 /*paranoid*/ verify(iproc < ready);
174
175 // Step 1 : make sure no writer are in the middle of the critical section
176 while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
[fd9b524]177 Pause();
[7768b8d]178
179 // Fence needed because we don't want to start trying to acquire the lock
180 // before we read a false.
181 // Not needed on x86
182 // std::atomic_thread_fence(std::memory_order_seq_cst);
183
184 // Step 2 : acquire our local lock
185 __atomic_acquire( &data[iproc].lock );
186 /*paranoid*/ verify(data[iproc].lock);
[64a7146]187
188 #ifdef __CFA_WITH_VERIFY__
189 // Debug, check if this is owned for reading
190 data[iproc].owned = true;
191 #endif
[7768b8d]192}
193
[e873838]194static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
[8fc652e0]195 /* paranoid */ verify( ! __preemption_enabled() );
196 /* paranoid */ verify( kernelTLS().this_proc_id );
[e873838]197
[8fc652e0]198 unsigned iproc = kernelTLS().this_proc_id->id;
199 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
[7768b8d]200 /*paranoid*/ verify(iproc < ready);
201 /*paranoid*/ verify(data[iproc].lock);
[64a7146]202 /*paranoid*/ verify(data[iproc].owned);
203 #ifdef __CFA_WITH_VERIFY__
204 // Debug, check if this is owned for reading
205 data[iproc].owned = false;
206 #endif
[dca5802]207 __atomic_unlock(&data[iproc].lock);
[7768b8d]208}
209
[64a7146]210#ifdef __CFA_WITH_VERIFY__
[e873838]211 static inline bool ready_schedule_islocked(void) {
[8fc652e0]212 /* paranoid */ verify( ! __preemption_enabled() );
213 /*paranoid*/ verify( kernelTLS().this_proc_id );
214 __processor_id_t * proc = kernelTLS().this_proc_id;
[64a7146]215 return __scheduler_lock->data[proc->id].owned;
216 }
217
218 static inline bool ready_mutate_islocked() {
219 return __scheduler_lock->lock;
220 }
221#endif
222
[7768b8d]223//-----------------------------------------------------------------------
224// Writer side : acquire when changing the ready queue, e.g. adding more
225// queues or removing them.
[b388ee81]226uint_fast32_t ready_mutate_lock( void );
[7768b8d]227
[b388ee81]228void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
[7768b8d]229
[a33c113]230//-----------------------------------------------------------------------
231// Lock-Free registering/unregistering of threads
232// Register a processor to a given cluster and get its unique id in return
233// For convenience, also acquires the lock
234static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) {
235 register_proc_id( proc );
236 return ready_mutate_lock();
237}
238
239// Unregister a processor from a given cluster using its id, getting back the original pointer
240// assumes the lock is acquired
241static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) {
242 ready_mutate_unlock( last_s );
243 unregister_proc_id( proc );
244}
245
[a7504db5]246//-----------------------------------------------------------------------
247// Cluster idle lock/unlock
[6a9b12b]248static inline void lock(__cluster_proc_list & this) {
[a7504db5]249 /* paranoid */ verify( ! __preemption_enabled() );
250
251 // Start by locking the global RWlock so that we know no-one is
252 // adding/removing processors while we mess with the idle lock
253 ready_schedule_lock();
254
255 // Simple counting lock, acquired, acquired by incrementing the counter
256 // to an odd number
257 for() {
258 uint64_t l = this.lock;
259 if(
260 (0 == (l % 2))
261 && __atomic_compare_exchange_n(&this.lock, &l, l + 1, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
262 ) return;
263 Pause();
264 }
265
266 /* paranoid */ verify( ! __preemption_enabled() );
267}
268
[6a9b12b]269static inline void unlock(__cluster_proc_list & this) {
[a7504db5]270 /* paranoid */ verify( ! __preemption_enabled() );
271
272 /* paranoid */ verify( 1 == (this.lock % 2) );
273 // Simple couting lock, release by incrementing to an even number
274 __atomic_fetch_add( &this.lock, 1, __ATOMIC_SEQ_CST );
275
276 // Release the global lock, which we acquired when locking
277 ready_schedule_unlock();
278
279 /* paranoid */ verify( ! __preemption_enabled() );
280}
281
[b798713]282//=======================================================================
283// Ready-Queue API
[dca5802]284//-----------------------------------------------------------------------
285// push thread onto a ready queue for a cluster
286// returns true if the list was previously empty, false otherwise
[9cc3a18]287__attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd);
[dca5802]288
289//-----------------------------------------------------------------------
290// pop thread from the ready queue of a cluster
291// returns 0p if empty
[1eb239e4]292// May return 0p spuriously
[431cd4f]293__attribute__((hot)) struct $thread * pop_fast(struct cluster * cltr);
[dca5802]294
[1eb239e4]295//-----------------------------------------------------------------------
296// pop thread from the ready queue of a cluster
297// returns 0p if empty
298// guaranteed to find any threads added before this call
299__attribute__((hot)) struct $thread * pop_slow(struct cluster * cltr);
300
[dca5802]301//-----------------------------------------------------------------------
302// Increase the width of the ready queue (number of lanes) by 4
[a017ee7]303void ready_queue_grow (struct cluster * cltr);
[dca5802]304
305//-----------------------------------------------------------------------
306// Decrease the width of the ready queue (number of lanes) by 4
[a017ee7]307void ready_queue_shrink(struct cluster * cltr);
[b798713]308
[de94a60]309
[75f3522]310// Local Variables: //
311// mode: c //
312// tab-width: 4 //
[4aa2fb2]313// End: //
Note: See TracBrowser for help on using the repository browser.