source: libcfa/src/concurrency/invoke.h@ 63be3387

ADT ast-experimental
Last change on this file since 63be3387 was 639e4fc, checked in by Thierry Delisle <tdelisle@…>, 3 years ago

Changed cluster link to use explicit type to avoid anonymous names in symbols gdb cares about

  • Property mode set to 100644
File size: 9.0 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// invoke.h --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2016
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Sun Jan 9 19:06:45 2022
13// Update Count : 48
14//
15
16#include "bits/containers.hfa"
17#include "bits/defs.hfa"
18#include "bits/locks.hfa"
19#include "kernel/fwd.hfa"
20
21#ifdef __cforall
22#include "containers/list.hfa"
23extern "C" {
24#endif
25
26#if ! defined(__CFA_INVOKE_PRIVATE__)
27#ifndef _INVOKE_H_
28#define _INVOKE_H_
29
30 enum { DEFAULT_STACK_SIZE = 65000 };
31
32 struct __cfaehm_try_resume_node;
33 struct __cfaehm_base_exception_t;
34 struct exception_context_t {
35 struct __cfaehm_try_resume_node * top_resume;
36 struct __cfaehm_base_exception_t * current_exception;
37 };
38
39 struct __stack_context_t {
40 void * SP;
41 void * FP;
42 };
43
44 // low adresses : +----------------------+ <- start of allocation
45 // | optional guard page |
46 // +----------------------+ <- __stack_t.limit
47 // | |
48 // | /\ /\ /\ |
49 // | || || || |
50 // | |
51 // | program stack |
52 // | |
53 // __stack_info_t.storage -> +----------------------+ <- __stack_t.base
54 // | __stack_t |
55 // high adresses : +----------------------+ <- end of allocation
56
57 struct __stack_t {
58 // stack grows towards stack limit
59 void * limit;
60
61 // base of stack
62 void * base;
63
64 // Information for exception handling.
65 struct exception_context_t exception_context;
66 };
67
68 struct __stack_info_t {
69 // pointer to stack
70 struct __stack_t * storage;
71 };
72
73 enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled, Halting };
74
75 struct coroutine$ {
76 // context that is switch during a __cfactx_switch
77 struct __stack_context_t context;
78
79 // stack information of the coroutine
80 struct __stack_info_t stack;
81
82 // textual name for coroutine/task
83 const char * name;
84
85 // current execution status for coroutine
86 enum __Coroutine_State state;
87
88 // first coroutine to resume this one
89 struct coroutine$ * starter;
90
91 // last coroutine to resume this one
92 struct coroutine$ * last;
93
94 // If non-null stack must be unwound with this exception
95 struct _Unwind_Exception * cancellation;
96
97 };
98 // Wrapper for gdb
99 struct cfathread_coroutine_t { struct coroutine$ debug; };
100
101 static inline struct __stack_t * __get_stack( struct coroutine$ * cor ) {
102 return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2));
103 }
104
105 // struct which calls the monitor is accepting
106 struct __waitfor_mask_t {
107 // the index of the accepted function, -1 if none
108 short * accepted;
109
110 // list of acceptable functions, null if any
111 __cfa_anonymous_object( __small_array_t(struct __acceptable_t) );
112 };
113
114 struct monitor$ {
115 // spinlock to protect internal data
116 struct __spinlock_t lock;
117
118 // current owner of the monitor
119 struct thread$ * owner;
120
121 // queue of threads that are blocked waiting for the monitor
122 __queue_t(struct thread$) entry_queue;
123
124 // stack of conditions to run next once we exit the monitor
125 __stack_t(struct __condition_criterion_t) signal_stack;
126
127 // monitor routines can be called recursively, we need to keep track of that
128 unsigned int recursion;
129
130 // mask used to know if some thread is waiting for something while holding the monitor
131 struct __waitfor_mask_t mask;
132
133 // node used to signal the dtor in a waitfor dtor
134 struct __condition_node_t * dtor_node;
135 };
136 // Wrapper for gdb
137 struct cfathread_monitor_t { struct monitor$ debug; };
138
139 struct __monitor_group_t {
140 // currently held monitors
141 __cfa_anonymous_object( __small_array_t(monitor$*) );
142
143 // last function that acquired monitors
144 fptr_t func;
145 };
146
147 // Link lists fields
148 // instrusive link field for threads in the ready-queue
149 struct __thread_desc_link {
150 struct thread$ * next;
151 volatile unsigned long long ts;
152 };
153
154 // Link lists fields
155 // instrusive link field for threads in the user_link/cltr_link
156 struct __thread_user_link {
157 #ifdef __cforall
158 inline dlink(thread$);
159 #else
160 struct thread$ * next; struct thread$ * back;
161 #endif
162 };
163 _Static_assert(sizeof(struct __thread_user_link) == 2 * sizeof(struct thread$ *), "__thread_user_link should be consistent in C and Cforall");
164
165 struct thread$ {
166 // Core threading fields
167 // context that is switch during a __cfactx_switch
168 struct __stack_context_t context;
169
170 // Link lists fields
171 // instrusive link field for threads
172 struct __thread_desc_link rdy_link;
173
174 // current execution status for coroutine
175 // Possible values are:
176 // - TICKET_BLOCKED (-1) thread is blocked
177 // - TICKET_RUNNING ( 0) thread is running
178 // - TICKET_UNBLOCK ( 1) thread should ignore next block
179 volatile int ticket;
180 enum __Coroutine_State state:8;
181 enum __Preemption_Reason preempted:8;
182
183 bool corctx_flag;
184
185 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
186
187 // pointer to the cluster on which the thread is running
188 struct cluster * curr_cluster;
189
190 // preferred ready-queue or CPU
191 unsigned preferred;
192
193 // coroutine body used to store context
194 struct coroutine$ self_cor;
195
196 // current active context
197 struct coroutine$ * curr_cor;
198
199 // monitor body used for mutual exclusion
200 struct monitor$ self_mon;
201
202 // pointer to monitor with sufficient lifetime for current monitors
203 struct monitor$ * self_mon_p;
204
205 // monitors currently held by this thread
206 struct __monitor_group_t monitors;
207
208 // intrusive link fields, used for locks, monitors and any user defined data structure
209 // default link fields for dlist
210 struct __thread_user_link user_link;
211
212 // secondary intrusive link fields, used for global cluster list
213 // default link fields for dlist
214 struct __thread_user_link cltr_link;
215
216 // used to store state between clh lock/unlock
217 volatile bool * clh_prev;
218
219 // used to point to this thd's current clh node
220 volatile bool * clh_node;
221
222 struct processor * last_proc;
223
224 uint32_t random_state; // fast random numbers
225
226 #if defined( __CFA_WITH_VERIFY__ )
227 struct processor * volatile executing;
228 void * canary;
229 #endif
230 };
231
232 // Wrapper for gdb
233 struct cfathread_thread_t { struct thread$ debug; };
234
235 #ifdef __CFA_DEBUG__
236 void __cfaabi_dbg_record_thrd(thread$ & this, bool park, const char prev_name[]);
237 #else
238 #define __cfaabi_dbg_record_thrd(x, y, z)
239 #endif
240
241 #ifdef __cforall
242 extern "Cforall" {
243 static inline thread$ * volatile & ?`next ( thread$ * this ) {
244 return this->user_link.next;
245 }
246
247 static inline thread$ *& get_next( thread$ & this ) __attribute__((const)) {
248 return this.user_link.next;
249 }
250
251 static inline tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) {
252 dlink(thread$) & b = this.user_link;
253 tytagref( dlink(thread$), dlink(thread$) ) result = { b };
254 return result;
255 }
256
257 static inline tytagref(struct __thread_user_link, dlink(thread$)) ?`inner( struct thread$ & this ) {
258 struct __thread_user_link & ib = this.cltr_link;
259 dlink(thread$) & b = ib`inner;
260 tytagref(struct __thread_user_link, dlink(thread$)) result = { b };
261 return result;
262 }
263
264 P9_EMBEDDED(struct __thread_user_link, dlink(thread$))
265
266 static inline void ?{}(__monitor_group_t & this) {
267 (this.data){0p};
268 (this.size){0};
269 (this.func){NULL};
270 }
271
272 static inline void ?{}(__monitor_group_t & this, struct monitor$ ** data, __lock_size_t size, fptr_t func) {
273 (this.data){data};
274 (this.size){size};
275 (this.func){func};
276 }
277
278 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) {
279 if( (lhs.data != 0) != (rhs.data != 0) ) return false;
280 if( lhs.size != rhs.size ) return false;
281 if( lhs.func != rhs.func ) return false;
282
283 // Check that all the monitors match
284 for( int i = 0; i < lhs.size; i++ ) {
285 // If not a match, check next function
286 if( lhs[i] != rhs[i] ) return false;
287 }
288
289 return true;
290 }
291
292 static inline void ?=?(__monitor_group_t & lhs, const __monitor_group_t & rhs) {
293 lhs.data = rhs.data;
294 lhs.size = rhs.size;
295 lhs.func = rhs.func;
296 }
297 }
298 #endif
299
300#endif //_INVOKE_H_
301#else //! defined(__CFA_INVOKE_PRIVATE__)
302#ifndef _INVOKE_PRIVATE_H_
303#define _INVOKE_PRIVATE_H_
304
305 struct machine_context_t {
306 void *SP;
307 void *FP;
308 void *PC;
309 };
310
311 // assembler routines that performs the context switch
312 extern void __cfactx_invoke_stub( void );
313 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
314 // void CtxStore ( void * this ) asm ("CtxStore");
315 // void CtxRet ( void * dst ) asm ("CtxRet");
316
317#endif //_INVOKE_PRIVATE_H_
318#endif //! defined(__CFA_INVOKE_PRIVATE__)
319#ifdef __cforall
320}
321#endif
322
323// Local Variables: //
324// mode: c //
325// tab-width: 4 //
326// End: //
Note: See TracBrowser for help on using the repository browser.