source: libcfa/src/concurrency/invoke.h@ 708ae38

ADT ast-experimental enum pthread-emulation qualifiedEnum
Last change on this file since 708ae38 was 2210cfc, checked in by Peter A. Buhr <pabuhr@…>, 4 years ago

second attempt at specialized PRNG

  • Property mode set to 100644
File size: 8.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// invoke.h --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2016
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Sun Jan 9 19:06:45 2022
13// Update Count : 48
14//
15
16#include "bits/containers.hfa"
17#include "bits/defs.hfa"
18#include "bits/locks.hfa"
19#include "kernel/fwd.hfa"
20
21#ifdef __cforall
22#include "containers/list.hfa"
23extern "C" {
24#endif
25
26#if ! defined(__CFA_INVOKE_PRIVATE__)
27#ifndef _INVOKE_H_
28#define _INVOKE_H_
29
30 enum { DEFAULT_STACK_SIZE = 65000 };
31
32 struct __cfaehm_try_resume_node;
33 struct __cfaehm_base_exception_t;
34 struct exception_context_t {
35 struct __cfaehm_try_resume_node * top_resume;
36 struct __cfaehm_base_exception_t * current_exception;
37 };
38
39 struct __stack_context_t {
40 void * SP;
41 void * FP;
42 };
43
44 // low adresses : +----------------------+ <- start of allocation
45 // | optional guard page |
46 // +----------------------+ <- __stack_t.limit
47 // | |
48 // | /\ /\ /\ |
49 // | || || || |
50 // | |
51 // | program stack |
52 // | |
53 // __stack_info_t.storage -> +----------------------+ <- __stack_t.base
54 // | __stack_t |
55 // high adresses : +----------------------+ <- end of allocation
56
57 struct __stack_t {
58 // stack grows towards stack limit
59 void * limit;
60
61 // base of stack
62 void * base;
63
64 // Information for exception handling.
65 struct exception_context_t exception_context;
66 };
67
68 struct __stack_info_t {
69 // pointer to stack
70 struct __stack_t * storage;
71 };
72
73 enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled, Halting };
74
75 struct coroutine$ {
76 // context that is switch during a __cfactx_switch
77 struct __stack_context_t context;
78
79 // stack information of the coroutine
80 struct __stack_info_t stack;
81
82 // textual name for coroutine/task
83 const char * name;
84
85 // current execution status for coroutine
86 enum __Coroutine_State state;
87
88 // first coroutine to resume this one
89 struct coroutine$ * starter;
90
91 // last coroutine to resume this one
92 struct coroutine$ * last;
93
94 // If non-null stack must be unwound with this exception
95 struct _Unwind_Exception * cancellation;
96
97 };
98 // Wrapper for gdb
99 struct cfathread_coroutine_t { struct coroutine$ debug; };
100
101 static inline struct __stack_t * __get_stack( struct coroutine$ * cor ) {
102 return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2));
103 }
104
105 // struct which calls the monitor is accepting
106 struct __waitfor_mask_t {
107 // the index of the accepted function, -1 if none
108 short * accepted;
109
110 // list of acceptable functions, null if any
111 __cfa_anonymous_object( __small_array_t(struct __acceptable_t) );
112 };
113
114 struct monitor$ {
115 // spinlock to protect internal data
116 struct __spinlock_t lock;
117
118 // current owner of the monitor
119 struct thread$ * owner;
120
121 // queue of threads that are blocked waiting for the monitor
122 __queue_t(struct thread$) entry_queue;
123
124 // stack of conditions to run next once we exit the monitor
125 __stack_t(struct __condition_criterion_t) signal_stack;
126
127 // monitor routines can be called recursively, we need to keep track of that
128 unsigned int recursion;
129
130 // mask used to know if some thread is waiting for something while holding the monitor
131 struct __waitfor_mask_t mask;
132
133 // node used to signal the dtor in a waitfor dtor
134 struct __condition_node_t * dtor_node;
135 };
136 // Wrapper for gdb
137 struct cfathread_monitor_t { struct monitor$ debug; };
138
139 struct __monitor_group_t {
140 // currently held monitors
141 __cfa_anonymous_object( __small_array_t(monitor$*) );
142
143 // last function that acquired monitors
144 fptr_t func;
145 };
146
147 // Link lists fields
148 // instrusive link field for threads
149 struct __thread_desc_link {
150 struct thread$ * next;
151 volatile unsigned long long ts;
152 };
153
154 struct thread$ {
155 // Core threading fields
156 // context that is switch during a __cfactx_switch
157 struct __stack_context_t context;
158
159 // Link lists fields
160 // instrusive link field for threads
161 struct __thread_desc_link link;
162
163 // current execution status for coroutine
164 // Possible values are:
165 // - TICKET_BLOCKED (-1) thread is blocked
166 // - TICKET_RUNNING ( 0) thread is running
167 // - TICKET_UNBLOCK ( 1) thread should ignore next block
168 volatile int ticket;
169 enum __Coroutine_State state:8;
170 enum __Preemption_Reason preempted:8;
171
172 bool corctx_flag;
173
174 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
175
176 // pointer to the cluster on which the thread is running
177 struct cluster * curr_cluster;
178
179 // preferred ready-queue or CPU
180 unsigned preferred;
181
182 // coroutine body used to store context
183 struct coroutine$ self_cor;
184
185 // current active context
186 struct coroutine$ * curr_cor;
187
188 // monitor body used for mutual exclusion
189 struct monitor$ self_mon;
190
191 // pointer to monitor with sufficient lifetime for current monitors
192 struct monitor$ * self_mon_p;
193
194 // monitors currently held by this thread
195 struct __monitor_group_t monitors;
196
197 // used to put threads on user data structures
198 struct {
199 struct thread$ * next;
200 struct thread$ * back;
201 } seqable;
202
203 // used to put threads on dlist data structure
204 __cfa_dlink(thread$);
205
206 struct {
207 struct thread$ * next;
208 struct thread$ * prev;
209 } node;
210
211 struct processor * last_proc;
212
213 uint32_t random_state; // fast random numbers
214
215 #if defined( __CFA_WITH_VERIFY__ )
216 void * canary;
217 #endif
218 };
219 #ifdef __cforall
220 P9_EMBEDDED( thread$, dlink(thread$) )
221 #endif
222 // Wrapper for gdb
223 struct cfathread_thread_t { struct thread$ debug; };
224
225 #ifdef __CFA_DEBUG__
226 void __cfaabi_dbg_record_thrd(thread$ & this, bool park, const char prev_name[]);
227 #else
228 #define __cfaabi_dbg_record_thrd(x, y, z)
229 #endif
230
231 #ifdef __cforall
232 extern "Cforall" {
233
234 static inline thread$ *& get_next( thread$ & this ) __attribute__((const)) {
235 return this.link.next;
236 }
237
238 static inline [thread$ *&, thread$ *& ] __get( thread$ & this ) __attribute__((const)) {
239 return this.node.[next, prev];
240 }
241
242 static inline thread$ * volatile & ?`next ( thread$ * this ) __attribute__((const)) {
243 return this->seqable.next;
244 }
245
246 static inline thread$ *& Back( thread$ * this ) __attribute__((const)) {
247 return this->seqable.back;
248 }
249
250 static inline thread$ *& Next( thread$ * this ) __attribute__((const)) {
251 return this->seqable.next;
252 }
253
254 static inline bool listed( thread$ * this ) {
255 return this->seqable.next != 0p;
256 }
257
258 static inline void ?{}(__monitor_group_t & this) {
259 (this.data){0p};
260 (this.size){0};
261 (this.func){NULL};
262 }
263
264 static inline void ?{}(__monitor_group_t & this, struct monitor$ ** data, __lock_size_t size, fptr_t func) {
265 (this.data){data};
266 (this.size){size};
267 (this.func){func};
268 }
269
270 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) {
271 if( (lhs.data != 0) != (rhs.data != 0) ) return false;
272 if( lhs.size != rhs.size ) return false;
273 if( lhs.func != rhs.func ) return false;
274
275 // Check that all the monitors match
276 for( int i = 0; i < lhs.size; i++ ) {
277 // If not a match, check next function
278 if( lhs[i] != rhs[i] ) return false;
279 }
280
281 return true;
282 }
283
284 static inline void ?=?(__monitor_group_t & lhs, const __monitor_group_t & rhs) {
285 lhs.data = rhs.data;
286 lhs.size = rhs.size;
287 lhs.func = rhs.func;
288 }
289 }
290 #endif
291
292#endif //_INVOKE_H_
293#else //! defined(__CFA_INVOKE_PRIVATE__)
294#ifndef _INVOKE_PRIVATE_H_
295#define _INVOKE_PRIVATE_H_
296
297 struct machine_context_t {
298 void *SP;
299 void *FP;
300 void *PC;
301 };
302
303 // assembler routines that performs the context switch
304 extern void __cfactx_invoke_stub( void );
305 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
306 // void CtxStore ( void * this ) asm ("CtxStore");
307 // void CtxRet ( void * dst ) asm ("CtxRet");
308
309#endif //_INVOKE_PRIVATE_H_
310#endif //! defined(__CFA_INVOKE_PRIVATE__)
311#ifdef __cforall
312}
313#endif
314
315// Local Variables: //
316// mode: c //
317// tab-width: 4 //
318// End: //
Note: See TracBrowser for help on using the repository browser.