source: libcfa/src/concurrency/kernel_private.hfa@ d4f1521

ADT arm-eh ast-experimental enum forall-pointer-decay jacob/cs343-translation new-ast new-ast-unique-expr pthread-emulation qualifiedEnum
Last change on this file since d4f1521 was 7768b8d, checked in by Thierry Delisle <tdelisle@…>, 6 years ago

First step at adding the new ready queue to Cforall

  • Property mode set to 100644
File size: 6.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// kernel_private.hfa --
8//
9// Author : Thierry Delisle
10// Created On : Mon Feb 13 12:27:26 2017
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Thu Mar 29 14:06:40 2018
13// Update Count : 3
14//
15
16#pragma once
17
18#include "kernel.hfa"
19#include "thread.hfa"
20
21#include "alarm.hfa"
22
23
24//-----------------------------------------------------------------------------
25// Scheduler
26
27extern "C" {
28 void disable_interrupts() OPTIONAL_THREAD;
29 void enable_interrupts_noPoll();
30 void enable_interrupts( __cfaabi_dbg_ctx_param );
31}
32
33void ScheduleThread( thread_desc * );
34static inline void WakeThread( thread_desc * thrd ) {
35 if( !thrd ) return;
36
37 verify(thrd->state == Inactive);
38
39 disable_interrupts();
40 ScheduleThread( thrd );
41 enable_interrupts( __cfaabi_dbg_ctx );
42}
43thread_desc * nextThread(cluster * this);
44
45//Block current thread and release/wake-up the following resources
46void BlockInternal(void);
47void BlockInternal(__spinlock_t * lock);
48void BlockInternal(thread_desc * thrd);
49void BlockInternal(__spinlock_t * lock, thread_desc * thrd);
50void BlockInternal(__spinlock_t * locks [], unsigned short count);
51void BlockInternal(__spinlock_t * locks [], unsigned short count, thread_desc * thrds [], unsigned short thrd_count);
52void BlockInternal(__finish_callback_fptr_t callback);
53void LeaveThread(__spinlock_t * lock, thread_desc * thrd);
54
55//-----------------------------------------------------------------------------
56// Processor
57void main(processorCtx_t *);
58
59static inline void wake_fast(processor * this) {
60 __cfaabi_dbg_print_safe("Kernel : Waking up processor %p\n", this);
61 post( this->idleLock );
62}
63
64static inline void wake(processor * this) {
65 disable_interrupts();
66 wake_fast(this);
67 enable_interrupts( __cfaabi_dbg_ctx );
68}
69
70struct event_kernel_t {
71 alarm_list_t alarms;
72 __spinlock_t lock;
73};
74
75extern event_kernel_t * event_kernel;
76
77struct __cfa_kernel_preemption_state_t {
78 bool enabled;
79 bool in_progress;
80 unsigned short disable_count;
81};
82
83extern volatile thread_local __cfa_kernel_preemption_state_t preemption_state __attribute__ ((tls_model ( "initial-exec" )));
84
85//-----------------------------------------------------------------------------
86// Threads
87extern "C" {
88 forall(dtype T | is_thread(T))
89 void CtxInvokeThread(T * this);
90}
91
92extern void ThreadCtxSwitch(coroutine_desc * src, coroutine_desc * dst);
93
94__cfaabi_dbg_debug_do(
95 extern void __cfaabi_dbg_thread_register ( thread_desc * thrd );
96 extern void __cfaabi_dbg_thread_unregister( thread_desc * thrd );
97)
98
99//-----------------------------------------------------------------------------
100// Utils
101#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
102
103static inline uint32_t tls_rand() {
104 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 6;
105 kernelTLS.rand_seed ^= kernelTLS.rand_seed >> 21;
106 kernelTLS.rand_seed ^= kernelTLS.rand_seed << 7;
107 return kernelTLS.rand_seed;
108}
109
110
111void doregister( struct cluster & cltr );
112void unregister( struct cluster & cltr );
113
114void doregister( struct cluster * cltr, struct thread_desc & thrd );
115void unregister( struct cluster * cltr, struct thread_desc & thrd );
116
117//=======================================================================
118// Cluster lock API
119//=======================================================================
120struct __attribute__((aligned(64))) __processor_id {
121 processor * volatile handle;
122 volatile bool lock;
123};
124
125// Lock-Free registering/unregistering of threads
126// Register a processor to a given cluster and get its unique id in return
127unsigned doregister( struct cluster * cltr, struct processor * proc );
128
129// Unregister a processor from a given cluster using its id, getting back the original pointer
130void unregister( struct cluster * cltr, struct processor * proc );
131
132//=======================================================================
133// Reader-writer lock implementation
134// Concurrent with doregister/unregister,
135// i.e., threads can be added at any point during or between the entry/exit
136static inline void __atomic_acquire(volatile bool * ll) {
137 while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
138 while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
139 asm volatile("pause");
140 }
141 /* paranoid */ verify(*ll);
142}
143
144static inline bool __atomic_try_acquire(volatile bool * ll) {
145 return __atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
146}
147
148static inline void __atomic_unlock(volatile bool * ll) {
149 /* paranoid */ verify(*ll);
150 __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
151}
152
153//-----------------------------------------------------------------------
154// Reader side : acquire when using the ready queue to schedule but not
155// creating/destroying queues
156static inline void ready_schedule_lock( struct cluster & cltr, struct processor * proc) with(cltr.ready_lock) {
157 unsigned iproc = proc->id;
158 /*paranoid*/ verify(data[iproc].handle == proc);
159 /*paranoid*/ verify(iproc < ready);
160
161 // Step 1 : make sure no writer are in the middle of the critical section
162 while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
163 asm volatile("pause");
164
165 // Fence needed because we don't want to start trying to acquire the lock
166 // before we read a false.
167 // Not needed on x86
168 // std::atomic_thread_fence(std::memory_order_seq_cst);
169
170 // Step 2 : acquire our local lock
171 __atomic_acquire( &data[iproc].lock );
172 /*paranoid*/ verify(data[iproc].lock);
173}
174
175static inline void ready_schedule_unlock( struct cluster & cltr, struct processor * proc) with(cltr.ready_lock) {
176 unsigned iproc = proc->id;
177 /*paranoid*/ verify(data[iproc].handle == proc);
178 /*paranoid*/ verify(iproc < ready);
179 /*paranoid*/ verify(data[iproc].lock);
180 __atomic_store_n(&data[iproc].lock, false, __ATOMIC_RELEASE);
181}
182
183//-----------------------------------------------------------------------
184// Writer side : acquire when changing the ready queue, e.g. adding more
185// queues or removing them.
186uint_fast32_t ready_mutate_lock( struct cluster & cltr );
187
188void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t );
189
190bool push(__intrusive_ready_queue_t & this, thread_desc * node);
191[thread_desc *, bool] pop(__intrusive_ready_queue_t & this);
192
193// Local Variables: //
194// mode: c //
195// tab-width: 4 //
196// End: //
Note: See TracBrowser for help on using the repository browser.