source: libcfa/src/concurrency/invoke.h@ 0794365

ast-experimental
Last change on this file since 0794365 was beeff61e, checked in by caparsons <caparson@…>, 2 years ago

some cleanup and a bunch of changes to support waituntil statement

  • Property mode set to 100644
File size: 9.3 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// invoke.h --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2016
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Mar 14 13:39:31 2023
13// Update Count : 59
14//
15
16// No not use #pragma once was this file is included twice in some places. It has its own guard system.
17
18#include "bits/containers.hfa"
19#include "bits/defs.hfa"
20#include "bits/locks.hfa"
21#include "bits/random.hfa"
22#include "kernel/fwd.hfa"
23
24#ifdef __cforall
25#include "containers/list.hfa"
26extern "C" {
27#endif
28
29#if ! defined(__CFA_INVOKE_PRIVATE__)
30#ifndef _INVOKE_H_
31#define _INVOKE_H_
32
33 enum { DEFAULT_STACK_SIZE = 65000 };
34
35 struct __cfaehm_try_resume_node;
36 struct __cfaehm_base_exception_t;
37 struct exception_context_t {
38 struct __cfaehm_try_resume_node * top_resume;
39 struct __cfaehm_base_exception_t * current_exception;
40 };
41
42 struct __stack_context_t {
43 void * SP;
44 void * FP;
45 };
46
47 // low adresses : +----------------------+ <- start of allocation
48 // | optional guard page |
49 // +----------------------+ <- __stack_t.limit
50 // | |
51 // | /\ /\ /\ |
52 // | || || || |
53 // | |
54 // | program stack |
55 // | |
56 // __stack_info_t.storage -> +----------------------+ <- __stack_t.base
57 // | __stack_t |
58 // high adresses : +----------------------+ <- end of allocation
59
60 struct __stack_t {
61 // stack grows towards stack limit
62 void * limit;
63
64 // base of stack
65 void * base;
66
67 // Information for exception handling.
68 struct exception_context_t exception_context;
69 };
70
71 struct __stack_info_t {
72 // pointer to stack
73 struct __stack_t * storage;
74 };
75
76 enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled, Halting };
77
78 struct coroutine$ {
79 // context that is switch during a __cfactx_switch
80 struct __stack_context_t context;
81
82 // stack information of the coroutine
83 struct __stack_info_t stack;
84
85 // textual name for coroutine/task
86 const char * name;
87
88 // current execution status for coroutine
89 enum __Coroutine_State state;
90
91 // first coroutine to resume this one
92 struct coroutine$ * starter;
93
94 // last coroutine to resume this one
95 struct coroutine$ * last;
96
97 // If non-null stack must be unwound with this exception
98 struct _Unwind_Exception * cancellation;
99
100 };
101 // Wrapper for gdb
102 struct cfathread_coroutine_t { struct coroutine$ debug; };
103
104 static inline struct __stack_t * __get_stack( struct coroutine$ * cor ) {
105 return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2));
106 }
107
108 // struct which calls the monitor is accepting
109 struct __waitfor_mask_t {
110 // the index of the accepted function, -1 if none
111 short * accepted;
112
113 // list of acceptable functions, null if any
114 __cfa_anonymous_object( __small_array_t(struct __acceptable_t) );
115 };
116
117 struct monitor$ {
118 // spinlock to protect internal data
119 struct __spinlock_t lock;
120
121 // current owner of the monitor
122 struct thread$ * owner;
123
124 // queue of threads that are blocked waiting for the monitor
125 __queue_t(struct thread$) entry_queue;
126
127 // stack of conditions to run next once we exit the monitor
128 __stack_t(struct __condition_criterion_t) signal_stack;
129
130 // monitor routines can be called recursively, we need to keep track of that
131 unsigned int recursion;
132
133 // mask used to know if some thread is waiting for something while holding the monitor
134 struct __waitfor_mask_t mask;
135
136 // node used to signal the dtor in a waitfor dtor
137 struct __condition_node_t * dtor_node;
138 };
139 // Wrapper for gdb
140 struct cfathread_monitor_t { struct monitor$ debug; };
141
142 struct __monitor_group_t {
143 // currently held monitors
144 __cfa_anonymous_object( __small_array_t(monitor$*) );
145
146 // last function that acquired monitors
147 fptr_t func;
148 };
149
150 // Link lists fields
151 // instrusive link field for threads in the ready-queue
152 struct __thread_desc_link {
153 struct thread$ * next;
154 volatile unsigned long long ts;
155 };
156
157 // Link lists fields
158 // instrusive link field for threads in the user_link/cltr_link
159 struct __thread_user_link {
160 #ifdef __cforall
161 inline dlink(thread$);
162 #else
163 struct thread$ * next; struct thread$ * back;
164 #endif
165 };
166 _Static_assert(sizeof(struct __thread_user_link) == 2 * sizeof(struct thread$ *), "__thread_user_link should be consistent in C and Cforall");
167
168 struct thread$ {
169 // Core threading fields
170 // context that is switch during a __cfactx_switch
171 struct __stack_context_t context;
172
173 // Link lists fields
174 // instrusive link field for threads
175 struct __thread_desc_link rdy_link;
176
177 // current execution status for coroutine
178 // Possible values are:
179 // - TICKET_BLOCKED (-1) thread is blocked
180 // - TICKET_RUNNING ( 0) thread is running
181 // - TICKET_UNBLOCK ( 1) thread should ignore next block
182 volatile int ticket;
183 enum __Coroutine_State state:8;
184 enum __Preemption_Reason preempted:8;
185
186 bool corctx_flag;
187
188 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
189
190 // pointer to the cluster on which the thread is running
191 struct cluster * curr_cluster;
192
193 // preferred ready-queue or CPU
194 unsigned preferred;
195
196 // coroutine body used to store context
197 struct coroutine$ self_cor;
198
199 // current active context
200 struct coroutine$ * curr_cor;
201
202 // monitor body used for mutual exclusion
203 struct monitor$ self_mon;
204
205 // pointer to monitor with sufficient lifetime for current monitors
206 struct monitor$ * self_mon_p;
207
208 // monitors currently held by this thread
209 struct __monitor_group_t monitors;
210
211 // intrusive link fields, used for locks, monitors and any user defined data structure
212 // default link fields for dlist
213 struct __thread_user_link user_link;
214
215 // secondary intrusive link fields, used for global cluster list
216 // default link fields for dlist
217 struct __thread_user_link cltr_link;
218
219 struct processor * last_proc;
220
221 // ptr used during handover between blocking lists to allow for stack allocation of intrusive nodes
222 // main use case is wait-morphing to allow a different node to be used to block on condvar vs lock
223 void * link_node;
224
225 PRNG_STATE_T random_state; // fast random numbers
226
227 #if defined( __CFA_WITH_VERIFY__ )
228 struct processor * volatile executing;
229 void * canary;
230 #endif
231 };
232
233 // Wrapper for gdb
234 struct cfathread_thread_t { struct thread$ debug; };
235
236 #ifdef __CFA_DEBUG__
237 void __cfaabi_dbg_record_thrd(thread$ & this, bool park, const char prev_name[]);
238 #else
239 #define __cfaabi_dbg_record_thrd(x, y, z)
240 #endif
241
242 #ifdef __cforall
243 extern "Cforall" {
244 static inline thread$ * volatile & ?`next ( thread$ * this ) {
245 return this->user_link.next;
246 }
247
248 static inline thread$ *& get_next( thread$ & this ) __attribute__((const)) {
249 return this.user_link.next;
250 }
251
252 static inline tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) {
253 dlink(thread$) & b = this.user_link;
254 tytagref( dlink(thread$), dlink(thread$) ) result = { b };
255 return result;
256 }
257
258 static inline tytagref(struct __thread_user_link, dlink(thread$)) ?`inner( struct thread$ & this ) {
259 struct __thread_user_link & ib = this.cltr_link;
260 dlink(thread$) & b = ib`inner;
261 tytagref(struct __thread_user_link, dlink(thread$)) result = { b };
262 return result;
263 }
264
265 P9_EMBEDDED(struct __thread_user_link, dlink(thread$))
266
267 static inline void ?{}(__monitor_group_t & this) {
268 (this.data){0p};
269 (this.size){0};
270 (this.func){NULL};
271 }
272
273 static inline void ?{}(__monitor_group_t & this, struct monitor$ ** data, __lock_size_t size, fptr_t func) {
274 (this.data){data};
275 (this.size){size};
276 (this.func){func};
277 }
278
279 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) {
280 if( (lhs.data != 0) != (rhs.data != 0) ) return false;
281 if( lhs.size != rhs.size ) return false;
282 if( lhs.func != rhs.func ) return false;
283
284 // Check that all the monitors match
285 for( int i = 0; i < lhs.size; i++ ) {
286 // If not a match, check next function
287 if( lhs[i] != rhs[i] ) return false;
288 }
289
290 return true;
291 }
292
293 static inline void ?=?(__monitor_group_t & lhs, const __monitor_group_t & rhs) {
294 lhs.data = rhs.data;
295 lhs.size = rhs.size;
296 lhs.func = rhs.func;
297 }
298 }
299 #endif
300
301#endif //_INVOKE_H_
302#else //! defined(__CFA_INVOKE_PRIVATE__)
303#ifndef _INVOKE_PRIVATE_H_
304#define _INVOKE_PRIVATE_H_
305
306 struct machine_context_t {
307 void *SP;
308 void *FP;
309 void *PC;
310 };
311
312 // assembler routines that performs the context switch
313 extern void __cfactx_invoke_stub( void );
314 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
315 // void CtxStore ( void * this ) asm ("CtxStore");
316 // void CtxRet ( void * dst ) asm ("CtxRet");
317
318#endif //_INVOKE_PRIVATE_H_
319#endif //! defined(__CFA_INVOKE_PRIVATE__)
320#ifdef __cforall
321}
322#endif
323
324// Local Variables: //
325// mode: c //
326// tab-width: 4 //
327// End: //
Note: See TracBrowser for help on using the repository browser.