source: libcfa/src/concurrency/invoke.h@ 9155026

ADT ast-experimental
Last change on this file since 9155026 was 0348fd8, checked in by caparsons <caparson@…>, 3 years ago

fixed clh bug where you couldn't hold more than 1 clh lock at once

  • Property mode set to 100644
File size: 9.0 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// invoke.h --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2016
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Tue Nov 29 20:42:21 2022
13// Update Count : 56
14//
15
16#include "bits/containers.hfa"
17#include "bits/defs.hfa"
18#include "bits/locks.hfa"
19#include "bits/random.hfa"
20#include "kernel/fwd.hfa"
21
22#ifdef __cforall
23#include "containers/list.hfa"
24extern "C" {
25#endif
26
27#if ! defined(__CFA_INVOKE_PRIVATE__)
28#ifndef _INVOKE_H_
29#define _INVOKE_H_
30
31 enum { DEFAULT_STACK_SIZE = 65000 };
32
33 struct __cfaehm_try_resume_node;
34 struct __cfaehm_base_exception_t;
35 struct exception_context_t {
36 struct __cfaehm_try_resume_node * top_resume;
37 struct __cfaehm_base_exception_t * current_exception;
38 };
39
40 struct __stack_context_t {
41 void * SP;
42 void * FP;
43 };
44
45 // low adresses : +----------------------+ <- start of allocation
46 // | optional guard page |
47 // +----------------------+ <- __stack_t.limit
48 // | |
49 // | /\ /\ /\ |
50 // | || || || |
51 // | |
52 // | program stack |
53 // | |
54 // __stack_info_t.storage -> +----------------------+ <- __stack_t.base
55 // | __stack_t |
56 // high adresses : +----------------------+ <- end of allocation
57
58 struct __stack_t {
59 // stack grows towards stack limit
60 void * limit;
61
62 // base of stack
63 void * base;
64
65 // Information for exception handling.
66 struct exception_context_t exception_context;
67 };
68
69 struct __stack_info_t {
70 // pointer to stack
71 struct __stack_t * storage;
72 };
73
74 enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled, Halting };
75
76 struct coroutine$ {
77 // context that is switch during a __cfactx_switch
78 struct __stack_context_t context;
79
80 // stack information of the coroutine
81 struct __stack_info_t stack;
82
83 // textual name for coroutine/task
84 const char * name;
85
86 // current execution status for coroutine
87 enum __Coroutine_State state;
88
89 // first coroutine to resume this one
90 struct coroutine$ * starter;
91
92 // last coroutine to resume this one
93 struct coroutine$ * last;
94
95 // If non-null stack must be unwound with this exception
96 struct _Unwind_Exception * cancellation;
97
98 };
99 // Wrapper for gdb
100 struct cfathread_coroutine_t { struct coroutine$ debug; };
101
102 static inline struct __stack_t * __get_stack( struct coroutine$ * cor ) {
103 return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2));
104 }
105
106 // struct which calls the monitor is accepting
107 struct __waitfor_mask_t {
108 // the index of the accepted function, -1 if none
109 short * accepted;
110
111 // list of acceptable functions, null if any
112 __cfa_anonymous_object( __small_array_t(struct __acceptable_t) );
113 };
114
115 struct monitor$ {
116 // spinlock to protect internal data
117 struct __spinlock_t lock;
118
119 // current owner of the monitor
120 struct thread$ * owner;
121
122 // queue of threads that are blocked waiting for the monitor
123 __queue_t(struct thread$) entry_queue;
124
125 // stack of conditions to run next once we exit the monitor
126 __stack_t(struct __condition_criterion_t) signal_stack;
127
128 // monitor routines can be called recursively, we need to keep track of that
129 unsigned int recursion;
130
131 // mask used to know if some thread is waiting for something while holding the monitor
132 struct __waitfor_mask_t mask;
133
134 // node used to signal the dtor in a waitfor dtor
135 struct __condition_node_t * dtor_node;
136 };
137 // Wrapper for gdb
138 struct cfathread_monitor_t { struct monitor$ debug; };
139
140 struct __monitor_group_t {
141 // currently held monitors
142 __cfa_anonymous_object( __small_array_t(monitor$*) );
143
144 // last function that acquired monitors
145 fptr_t func;
146 };
147
148 // Link lists fields
149 // instrusive link field for threads in the ready-queue
150 struct __thread_desc_link {
151 struct thread$ * next;
152 volatile unsigned long long ts;
153 };
154
155 // Link lists fields
156 // instrusive link field for threads in the user_link/cltr_link
157 struct __thread_user_link {
158 #ifdef __cforall
159 inline dlink(thread$);
160 #else
161 struct thread$ * next; struct thread$ * back;
162 #endif
163 };
164 _Static_assert(sizeof(struct __thread_user_link) == 2 * sizeof(struct thread$ *), "__thread_user_link should be consistent in C and Cforall");
165
166 struct thread$ {
167 // Core threading fields
168 // context that is switch during a __cfactx_switch
169 struct __stack_context_t context;
170
171 // Link lists fields
172 // instrusive link field for threads
173 struct __thread_desc_link rdy_link;
174
175 // current execution status for coroutine
176 // Possible values are:
177 // - TICKET_BLOCKED (-1) thread is blocked
178 // - TICKET_RUNNING ( 0) thread is running
179 // - TICKET_UNBLOCK ( 1) thread should ignore next block
180 volatile int ticket;
181 enum __Coroutine_State state:8;
182 enum __Preemption_Reason preempted:8;
183
184 bool corctx_flag;
185
186 //SKULLDUGGERY errno is not save in the thread data structure because returnToKernel appears to be the only function to require saving and restoring it
187
188 // pointer to the cluster on which the thread is running
189 struct cluster * curr_cluster;
190
191 // preferred ready-queue or CPU
192 unsigned preferred;
193
194 // coroutine body used to store context
195 struct coroutine$ self_cor;
196
197 // current active context
198 struct coroutine$ * curr_cor;
199
200 // monitor body used for mutual exclusion
201 struct monitor$ self_mon;
202
203 // pointer to monitor with sufficient lifetime for current monitors
204 struct monitor$ * self_mon_p;
205
206 // monitors currently held by this thread
207 struct __monitor_group_t monitors;
208
209 // intrusive link fields, used for locks, monitors and any user defined data structure
210 // default link fields for dlist
211 struct __thread_user_link user_link;
212
213 // secondary intrusive link fields, used for global cluster list
214 // default link fields for dlist
215 struct __thread_user_link cltr_link;
216
217 // used to point to this thd's current clh node
218 volatile bool * clh_node;
219
220 struct processor * last_proc;
221
222 PRNG_STATE_T random_state; // fast random numbers
223
224 #if defined( __CFA_WITH_VERIFY__ )
225 struct processor * volatile executing;
226 void * canary;
227 #endif
228 };
229
230 // Wrapper for gdb
231 struct cfathread_thread_t { struct thread$ debug; };
232
233 #ifdef __CFA_DEBUG__
234 void __cfaabi_dbg_record_thrd(thread$ & this, bool park, const char prev_name[]);
235 #else
236 #define __cfaabi_dbg_record_thrd(x, y, z)
237 #endif
238
239 #ifdef __cforall
240 extern "Cforall" {
241 static inline thread$ * volatile & ?`next ( thread$ * this ) {
242 return this->user_link.next;
243 }
244
245 static inline thread$ *& get_next( thread$ & this ) __attribute__((const)) {
246 return this.user_link.next;
247 }
248
249 static inline tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) {
250 dlink(thread$) & b = this.user_link;
251 tytagref( dlink(thread$), dlink(thread$) ) result = { b };
252 return result;
253 }
254
255 static inline tytagref(struct __thread_user_link, dlink(thread$)) ?`inner( struct thread$ & this ) {
256 struct __thread_user_link & ib = this.cltr_link;
257 dlink(thread$) & b = ib`inner;
258 tytagref(struct __thread_user_link, dlink(thread$)) result = { b };
259 return result;
260 }
261
262 P9_EMBEDDED(struct __thread_user_link, dlink(thread$))
263
264 static inline void ?{}(__monitor_group_t & this) {
265 (this.data){0p};
266 (this.size){0};
267 (this.func){NULL};
268 }
269
270 static inline void ?{}(__monitor_group_t & this, struct monitor$ ** data, __lock_size_t size, fptr_t func) {
271 (this.data){data};
272 (this.size){size};
273 (this.func){func};
274 }
275
276 static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) __attribute__((const)) {
277 if( (lhs.data != 0) != (rhs.data != 0) ) return false;
278 if( lhs.size != rhs.size ) return false;
279 if( lhs.func != rhs.func ) return false;
280
281 // Check that all the monitors match
282 for( int i = 0; i < lhs.size; i++ ) {
283 // If not a match, check next function
284 if( lhs[i] != rhs[i] ) return false;
285 }
286
287 return true;
288 }
289
290 static inline void ?=?(__monitor_group_t & lhs, const __monitor_group_t & rhs) {
291 lhs.data = rhs.data;
292 lhs.size = rhs.size;
293 lhs.func = rhs.func;
294 }
295 }
296 #endif
297
298#endif //_INVOKE_H_
299#else //! defined(__CFA_INVOKE_PRIVATE__)
300#ifndef _INVOKE_PRIVATE_H_
301#define _INVOKE_PRIVATE_H_
302
303 struct machine_context_t {
304 void *SP;
305 void *FP;
306 void *PC;
307 };
308
309 // assembler routines that performs the context switch
310 extern void __cfactx_invoke_stub( void );
311 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch");
312 // void CtxStore ( void * this ) asm ("CtxStore");
313 // void CtxRet ( void * dst ) asm ("CtxRet");
314
315#endif //_INVOKE_PRIVATE_H_
316#endif //! defined(__CFA_INVOKE_PRIVATE__)
317#ifdef __cforall
318}
319#endif
320
321// Local Variables: //
322// mode: c //
323// tab-width: 4 //
324// End: //
Note: See TracBrowser for help on using the repository browser.