source: libcfa/src/concurrency/kernel_private.hfa@ b388ee81

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since b388ee81 was b388ee81, checked in by Thierry Delisle <tdelisle@…>, 6 years ago

Changed ready RW-Lock to be a single global lock instead of per cluster.
This was needed because otherwise, processors outside the cluster could not schedule threads.

  • Property mode set to 100644
File size: 7.9 KB
RevLine 
[75f3522]1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
[73abe95]7// kernel_private.hfa --
[75f3522]8//
9// Author : Thierry Delisle
10// Created On : Mon Feb 13 12:27:26 2017
[6b0b624]11// Last Modified By : Peter A. Buhr
[1805b1b]12// Last Modified On : Sat Nov 30 19:25:02 2019
13// Update Count : 8
[75f3522]14//
15
[6b0b624]16#pragma once
[75f3522]17
[58b6d1b]18#include "kernel.hfa"
19#include "thread.hfa"
[75f3522]20
[73abe95]21#include "alarm.hfa"
[fa21ac9]22
[4aa2fb2]23
[75f3522]24//-----------------------------------------------------------------------------
25// Scheduler
[1c273d0]26
27extern "C" {
[2026bb6]28 void disable_interrupts() OPTIONAL_THREAD;
[969b3fe]29 void enable_interrupts_noPoll();
[36982fc]30 void enable_interrupts( __cfaabi_dbg_ctx_param );
[1c273d0]31}
32
[ac2b598]33void __schedule_thread( $thread * ) __attribute__((nonnull (1)));
[75f3522]34
[e60e0dc]35//Block current thread and release/wake-up the following resources
[b0c7419]36void __leave_thread() __attribute__((noreturn));
[db6f06a]37
[75f3522]38//-----------------------------------------------------------------------------
39// Processor
40void main(processorCtx_t *);
[85b1deb]41
[8c50aed]42void * __create_pthread( pthread_t *, void * (*)(void *), void * );
[1805b1b]43
[85b1deb]44
[75f3522]45
[e60e0dc]46struct event_kernel_t {
[fa21ac9]47 alarm_list_t alarms;
[ea7d2b0]48 __spinlock_t lock;
[fa21ac9]49};
50
[e60e0dc]51extern event_kernel_t * event_kernel;
52
[d8548e2]53struct __cfa_kernel_preemption_state_t {
[b69ea6b]54 bool enabled;
55 bool in_progress;
56 unsigned short disable_count;
57};
58
[afc2427]59extern volatile thread_local __cfa_kernel_preemption_state_t preemption_state __attribute__ ((tls_model ( "initial-exec" )));
[c81ebf9]60
[6502a2b]61extern cluster * mainCluster;
62
[75f3522]63//-----------------------------------------------------------------------------
64// Threads
65extern "C" {
[c7a900a]66 void __cfactx_invoke_thread(void (*main)(void *), void * this);
[75f3522]67}
68
[f7d6bb0]69__cfaabi_dbg_debug_do(
[ac2b598]70 extern void __cfaabi_dbg_thread_register ( $thread * thrd );
71 extern void __cfaabi_dbg_thread_unregister( $thread * thrd );
[f7d6bb0]72)
73
[2d8f7b0]74// KERNEL ONLY unpark with out disabling interrupts
75void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 );
76
[92976d9]77//-----------------------------------------------------------------------------
78// I/O
[dd4e2d7]79void __kernel_io_startup ( cluster &, unsigned, bool );
[f6660520]80void __kernel_io_finish_start( cluster & );
81void __kernel_io_prepare_stop( cluster & );
82void __kernel_io_shutdown ( cluster &, bool );
[92976d9]83
[969b3fe]84//-----------------------------------------------------------------------------
85// Utils
[7768b8d]86#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
[969b3fe]87
[8c50aed]88static inline uint32_t __tls_rand() {
[21184e3]89 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6;
90 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21;
91 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7;
92 return kernelTLS.rand_seed;
93}
94
[de94a60]95
[a1a17a74]96void doregister( struct cluster & cltr );
97void unregister( struct cluster & cltr );
[de94a60]98
[ac2b598]99void doregister( struct cluster * cltr, struct $thread & thrd );
100void unregister( struct cluster * cltr, struct $thread & thrd );
[de94a60]101
[504a7dc]102void doregister( struct cluster * cltr, struct processor * proc );
103void unregister( struct cluster * cltr, struct processor * proc );
104
[7768b8d]105//=======================================================================
106// Cluster lock API
107//=======================================================================
[b388ee81]108// Cells use by the reader writer lock
109// while not generic it only relies on a opaque pointer
[7768b8d]110struct __attribute__((aligned(64))) __processor_id {
111 processor * volatile handle;
112 volatile bool lock;
113};
114
115// Lock-Free registering/unregistering of threads
116// Register a processor to a given cluster and get its unique id in return
[b388ee81]117unsigned doregister( struct processor * proc );
[7768b8d]118
119// Unregister a processor from a given cluster using its id, getting back the original pointer
[b388ee81]120void unregister( struct processor * proc );
[7768b8d]121
122//=======================================================================
123// Reader-writer lock implementation
124// Concurrent with doregister/unregister,
125// i.e., threads can be added at any point during or between the entry/exit
[dca5802]126
127//-----------------------------------------------------------------------
128// simple spinlock underlying the RWLock
129// Blocking acquire
[7768b8d]130static inline void __atomic_acquire(volatile bool * ll) {
131 while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
132 while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
133 asm volatile("pause");
134 }
135 /* paranoid */ verify(*ll);
136}
137
[dca5802]138// Non-Blocking acquire
[7768b8d]139static inline bool __atomic_try_acquire(volatile bool * ll) {
[b798713]140 return !__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
[7768b8d]141}
142
[dca5802]143// Release
[7768b8d]144static inline void __atomic_unlock(volatile bool * ll) {
145 /* paranoid */ verify(*ll);
146 __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
147}
148
[b388ee81]149//-----------------------------------------------------------------------
150// Reader-Writer lock protecting the ready-queues
151// while this lock is mostly generic some aspects
152// have been hard-coded to for the ready-queue for
153// simplicity and performance
154struct __scheduler_RWLock_t {
155 // total cachelines allocated
156 unsigned int max;
157
158 // cachelines currently in use
159 volatile unsigned int alloc;
160
161 // cachelines ready to itereate over
162 // (!= to alloc when thread is in second half of doregister)
163 volatile unsigned int ready;
164
165 // writer lock
166 volatile bool lock;
167
168 // data pointer
169 __processor_id * data;
170};
171
172void ?{}(__scheduler_RWLock_t & this);
173void ^?{}(__scheduler_RWLock_t & this);
174
175extern __scheduler_RWLock_t * __scheduler_lock;
176
[7768b8d]177//-----------------------------------------------------------------------
178// Reader side : acquire when using the ready queue to schedule but not
179// creating/destroying queues
[b388ee81]180static inline void ready_schedule_lock( struct processor * proc) with(*__scheduler_lock) {
[7768b8d]181 unsigned iproc = proc->id;
182 /*paranoid*/ verify(data[iproc].handle == proc);
183 /*paranoid*/ verify(iproc < ready);
184
185 // Step 1 : make sure no writer are in the middle of the critical section
186 while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
187 asm volatile("pause");
188
189 // Fence needed because we don't want to start trying to acquire the lock
190 // before we read a false.
191 // Not needed on x86
192 // std::atomic_thread_fence(std::memory_order_seq_cst);
193
194 // Step 2 : acquire our local lock
195 __atomic_acquire( &data[iproc].lock );
196 /*paranoid*/ verify(data[iproc].lock);
197}
198
[b388ee81]199static inline void ready_schedule_unlock( struct processor * proc) with(*__scheduler_lock) {
[7768b8d]200 unsigned iproc = proc->id;
201 /*paranoid*/ verify(data[iproc].handle == proc);
202 /*paranoid*/ verify(iproc < ready);
203 /*paranoid*/ verify(data[iproc].lock);
[dca5802]204 __atomic_unlock(&data[iproc].lock);
[7768b8d]205}
206
207//-----------------------------------------------------------------------
208// Writer side : acquire when changing the ready queue, e.g. adding more
209// queues or removing them.
[b388ee81]210uint_fast32_t ready_mutate_lock( void );
[7768b8d]211
[b388ee81]212void ready_mutate_unlock( uint_fast32_t /* value returned by lock */ );
[7768b8d]213
[b798713]214//=======================================================================
215// Ready-Queue API
[dca5802]216//-----------------------------------------------------------------------
217// push thread onto a ready queue for a cluster
218// returns true if the list was previously empty, false otherwise
[504a7dc]219__attribute__((hot)) bool push(struct cluster * cltr, struct $thread * thrd);
[dca5802]220
221//-----------------------------------------------------------------------
222// pop thread from the ready queue of a cluster
223// returns 0p if empty
[504a7dc]224__attribute__((hot)) struct $thread * pop(struct cluster * cltr);
[dca5802]225
226//-----------------------------------------------------------------------
227// Increase the width of the ready queue (number of lanes) by 4
[b798713]228void ready_queue_grow (struct cluster * cltr);
[dca5802]229
230//-----------------------------------------------------------------------
231// Decrease the width of the ready queue (number of lanes) by 4
[b798713]232void ready_queue_shrink(struct cluster * cltr);
233
[dca5802]234//-----------------------------------------------------------------------
235// Statics call at the end of each thread to register statistics
[b798713]236#if !defined(__CFA_NO_STATISTICS__)
237void stats_tls_tally(struct cluster * cltr);
238#else
239static inline void stats_tls_tally(struct cluster * cltr) {}
240#endif
[de94a60]241
[75f3522]242// Local Variables: //
243// mode: c //
244// tab-width: 4 //
[4aa2fb2]245// End: //
Note: See TracBrowser for help on using the repository browser.