source: libcfa/src/concurrency/coroutine.cfa@ d683c2c

Last change on this file since d683c2c was d683c2c, checked in by Peter A. Buhr <pabuhr@…>, 27 hours ago

adjust name for ARM architecture

  • Property mode set to 100644
File size: 15.5 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// coroutine.c --
8//
9// Author : Thierry Delisle
10// Created On : Mon Nov 28 12:27:26 2016
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Sun Mar 1 20:35:45 2026
13// Update Count : 116
14//
15
16#define __cforall_thread__
17
18#include "coroutine.hfa"
19
20#include <stddef.h>
21#include <malloc.h>
22#include <errno.h>
23#include <string.h>
24#include <unistd.h>
25#include <sys/mman.h> // mprotect
26#include <unwind.h>
27
28#include "kernel/private.hfa"
29#include "exception.hfa"
30#include "exception.h"
31#include "math.hfa"
32
33#define CFA_COROUTINE_USE_MMAP 0
34
35#define __CFA_INVOKE_PRIVATE__
36#include "invoke.h"
37
38extern "C" {
39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine$ *) __attribute__ ((__noreturn__));
40 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
41 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
42 abort();
43 }
44
45 extern void CtxRet( struct __stack_context_t * to ) asm ("CtxRet") __attribute__ ((__noreturn__));
46}
47
48//-----------------------------------------------------------------------------
49forall(T &)
50void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public {
51 dst->virtual_table = src->virtual_table;
52 dst->the_coroutine = src->the_coroutine;
53 dst->the_exception = src->the_exception;
54}
55
56forall(T &)
57const char * msg(CoroutineCancelled(T) *) libcfa_public {
58 return "CoroutineCancelled(...)";
59}
60
61// This code should not be inlined. It is the error path on resume.
62forall(T & | is_coroutine(T))
63void __cfaehm_cancelled_coroutine(
64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ) libcfa_public {
65 verify( desc->cancellation );
66 desc->state = Cancelled;
67 exception_t * except = __cfaehm_cancellation_exception( desc->cancellation );
68
69 // TODO: Remove explitate vtable set once trac#186 is fixed.
70 CoroutineCancelled(T) except;
71 except.virtual_table = &_default_vtable;
72 except.the_coroutine = &cor;
73 except.the_exception = except;
74 // Why does this need a cast?
75 throwResume (CoroutineCancelled(T) &)except;
76
77 except->virtual_table->free( except );
78 free( desc->cancellation );
79 desc->cancellation = 0p;
80}
81
82// helper for popping from coroutine's ehm buffer
83static nonlocal_exception * pop_ehm_head( coroutine$ * this ) {
84 lock( this->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
85 nonlocal_exception * nl_ex = pop_head( this->ehm_state.ehm_buffer );
86 unlock( this->ehm_state.buffer_lock );
87 return nl_ex;
88}
89
90//-----------------------------------------------------------------------------
91// Global state variables
92
93// minimum feasible stack size in bytes
94static const size_t MinStackSize = 1000;
95extern size_t __page_size; // architecture pagesize HACK, should go in proper runtime singleton
96extern int __map_prot;
97
98void __stack_prepare( __stack_info_t * this, size_t create_size );
99static void __stack_clean( __stack_info_t * this );
100
101//-----------------------------------------------------------------------------
102// Coroutine ctors and dtors
103void ?{}( __stack_info_t & this, void * storage, size_t storageSize ) {
104 this.storage = (__stack_t *)storage;
105
106 // Did we get a piece of storage ?
107 if ( this.storage || storageSize != 0 ) {
108 // We either got a piece of storage or the user asked for a specific size
109 // Immediately create the stack
110 // (This is slightly unintuitive that non-default sized coroutines create are eagerly created
111 // but it avoids that all coroutines carry an unnecessary size)
112 verify( storageSize != 0 );
113 __stack_prepare( &this, storageSize );
114 }
115}
116
117void ^?{}(__stack_info_t & this) {
118 bool userStack = ((intptr_t)this.storage & 0x1) != 0;
119 if ( ! userStack && this.storage ) {
120 __stack_clean( &this );
121 }
122}
123
124void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) {
125 (this.context){0p, 0p};
126 (this.stack){storage, storageSize};
127 this.name = name;
128 state = Start;
129 starter = 0p;
130 this.last = 0p;
131 cancellation = 0p;
132 ehm_state.ehm_buffer{};
133 ehm_state.buffer_lock{};
134 ehm_state.ehm_enabled = false;
135}
136
137void ^?{}( coroutine$ & this ) libcfa_public {
138 // handle any leftover pending non-local exceptions
139 nonlocal_exception * nl_ex = pop_ehm_head( &this );
140 unsigned unhandled_ex = 0;
141
142 // if any leftover exceptions handle
143 for ( ; nl_ex != 0p; nl_ex = pop_ehm_head( &this ) ) {
144 unhandled_ex++;
145 free( nl_ex->the_exception );
146 free( nl_ex );
147 }
148
149 #ifdef __CFA_DEBUG__
150 if ( unhandled_ex > 0 )
151 printf( "Warning: Coroutine %p exited with %u pending nonlocal exceptions.\n", &this, unhandled_ex );
152 #endif
153
154 if ( this.state != Halted && this.state != Start && this.state != Primed ) {
155 coroutine$ * src = active_coroutine();
156 coroutine$ * dst = &this;
157
158 struct _Unwind_Exception storage;
159 storage.exception_class = -1;
160 storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;
161 this.cancellation = &storage;
162 this.last = src;
163
164 // not resuming self ?
165 if ( src == dst ) {
166 abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );
167 }
168
169 $ctx_switch( src, dst );
170 }
171}
172
173// Part of the Public API
174// Not inline since only ever called once per coroutine
175forall( T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); } )
176void prime( T & cor ) libcfa_public {
177 coroutine$ * this = get_coroutine(cor);
178 assert( this->state == Start );
179
180 this->state = Primed;
181 resume( cor );
182}
183
184static [void *, size_t] __stack_alloc( size_t storageSize ) {
185 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
186 assert( __page_size != 0l );
187 size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
188 size = ceiling( size, __page_size );
189
190 // If we are running debug, we also need to allocate a guardpage to catch stack overflows.
191 void * storage;
192 #if CFA_COROUTINE_USE_MMAP
193 storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
194 if (storage == ((void*)-1)) {
195 abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) );
196 }
197 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
198 abort( "coroutine stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
199 } // if
200 storage = (void *)(((intptr_t)storage) + __page_size);
201 #else
202 __cfaabi_dbg_debug_do(
203 storage = memalign( __page_size, size + __page_size );
204 );
205 __cfaabi_dbg_no_debug_do(
206 storage = (void*)malloc(size);
207 );
208
209 __cfaabi_dbg_debug_do(
210 if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
211 abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) );
212 }
213 storage = (void *)(((intptr_t)storage) + __page_size);
214 );
215 #endif
216 __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size);
217
218 verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul );
219 return [storage, size];
220}
221
222static void __stack_clean ( __stack_info_t * this ) {
223 void * storage = this->storage->limit;
224
225 #if CFA_COROUTINE_USE_MMAP
226 size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t);
227 storage = (void *)(((intptr_t)storage) - __page_size);
228 if (munmap(storage, size + __page_size) == -1) {
229 abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) );
230 }
231 #else
232 __cfaabi_dbg_debug_do(
233 storage = (char*)(storage) - __page_size;
234 if ( mprotect( storage, __page_size, __map_prot ) == -1 ) {
235 abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
236 }
237 );
238
239 free( storage );
240 #endif
241 __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage);
242}
243
244void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public {
245 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
246 bool userStack;
247 void * storage;
248 size_t size;
249 if ( ! this->storage ) {
250 userStack = false;
251 [storage, size] = __stack_alloc( create_size );
252 } else {
253 userStack = true;
254 __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%zd bytes)\n", this, this->storage, (intptr_t)this->storage->limit - (intptr_t)this->storage->base);
255
256 // The stack must be aligned, advance the pointer to the next align data
257 storage = (void*)libCeiling( (intptr_t)this->storage, libAlign());
258
259 // The size needs to be shrinked to fit all the extra data structure and be aligned
260 ptrdiff_t diff = (intptr_t)storage - (intptr_t)this->storage;
261 size = libFloor(create_size - stack_data_size - diff, libAlign());
262 } // if
263 assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize );
264
265 this->storage = (__stack_t *)((intptr_t)storage + size - sizeof(__stack_t));
266 this->storage->limit = storage;
267 this->storage->base = (void*)((intptr_t)storage + size - sizeof(__stack_t));
268 this->storage->exception_context.top_resume = 0p;
269 this->storage->exception_context.current_exception = 0p;
270 __attribute__((may_alias)) intptr_t * istorage = (intptr_t*)&this->storage;
271 *istorage |= userStack ? 0x1 : 0x0;
272}
273
274// We need to call suspend from invoke.c, so we expose this wrapper that
275// is not inline (We can't inline Cforall in C)
276extern "C" {
277 void __cfactx_cor_leave( struct coroutine$ * src ) {
278 coroutine$ * starter = src->cancellation != 0 ? src->last : src->starter;
279
280 src->state = Halted;
281
282 assertf( starter != 0,
283 "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
284 "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
285 src->name, src );
286 assertf( starter->state != Halted,
287 "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
288 "Possible cause is terminated coroutine's main routine has already returned.",
289 src->name, src, starter->name, starter );
290
291 $ctx_switch( src, starter );
292 }
293
294 struct coroutine$ * __cfactx_cor_finish(void) {
295 struct coroutine$ * cor = active_coroutine();
296
297 // get the active thread once
298 thread$ * athrd = active_thread();
299
300 /* paranoid */ verify( athrd->corctx_flag );
301 athrd->corctx_flag = false;
302
303 if (cor->state == Primed) {
304 __cfactx_suspend();
305 }
306
307 cor->state = Active;
308
309 return cor;
310 }
311}
312
313
314////////////////////////////////////////////////////////////////////////////////////////////////////
315// non local ehm routines
316
317void defaultResumeAtHandler( exception_t * except ) {
318 __cfaehm_allocate_exception( except );
319 __cfaehm_begin_unwind( (void(*)(exception_t *))defaultTerminationHandler );
320}
321
322struct ehm_cleanup {
323 exception_t * ex;
324};
325
326void ^?{}( ehm_cleanup & this ) { free( this.ex ); }
327
328void * stack_pointer( coroutine$ * cor ) libcfa_public {
329 if ( active_coroutine() == cor ) { // accessing myself ?
330 void * sp; // use my current stack value
331 #if defined( __i386__ )
332 asm( "movl %%esp,%0" : "=m" (sp) : );
333 #elif defined( __x86_64__ )
334 asm( "movq %%rsp,%0" : "=m" (sp) : );
335 #elif defined( __aarch64__ )
336 asm( "mov x9, sp; str x9,%0" : "=m" (sp) : : "x9" );
337 #else
338 #error Cforall : internal error, unsupported architecture
339 #endif
340 return sp;
341 } else { // accessing another coroutine
342 return cor->context.SP;
343 } // if
344} // stackPointer
345
346void * stack_pointer() libcfa_public { return stack_pointer( active_coroutine() ); }
347
348#define xstr(s) str(s)
349#define str(s) #s
350#define STACK_ERROR 4
351#define STACK_WARNING 16
352
353void stack_verify( coroutine$ * cor ) libcfa_public {
354 void * sp = stack_pointer( cor ); // optimizations
355 struct __stack_t * cor_stack = __get_stack( cor );
356 void * safelimit = (void *)((char *)cor_stack->limit + STACK_ERROR * 1024); // space needed for printing abort message and backtrace
357
358 if ( sp < safelimit ) { // must leave stack space to call abort
359 abort( "Stack overflow detected: stack pointer %p below safe limit %p.\n"
360 "Possible cause is allocation of large stack frame(s) and/or deep call stack.",
361 sp, safelimit );
362 } else if ( sp < (void *)((char *)cor_stack->limit + STACK_WARNING * 1024) ) { // must leave stack space to call abort
363 #define STACK_WARNING_MSG "Cforall Runtime warning : within " xstr(STACK_WARNING) "K of stack limit.\n"
364 __cfaabi_bits_write( STDERR_FILENO, STACK_WARNING_MSG, sizeof( STACK_WARNING_MSG ) - 1 );
365 } else if ( sp > cor_stack->base ) {
366 abort( "Stack underflow detected: stack pointer %p above base %p.\n"
367 "Possible cause is corrupted stack frame via overwriting memory.",
368 sp, cor_stack->base );
369 } // if
370} // verify
371
372void stack_verify() libcfa_public { return stack_verify( active_coroutine() ); }
373
374bool poll( coroutine$ * cor ) libcfa_public {
375 nonlocal_exception * nl_ex = pop_ehm_head( cor );
376
377 // if no exceptions return false
378 if ( nl_ex == 0p ) return false;
379
380 // otherwise loop and throwResume all pending exceptions
381 for ( ; nl_ex != 0p; nl_ex = pop_ehm_head( cor ) ) {
382 ehm_cleanup ex_holder{ nl_ex->the_exception };
383 free( nl_ex );
384 __cfaehm_throw_resume( ex_holder.ex , defaultResumeAtHandler );
385 }
386
387 return true;
388}
389
390bool poll() libcfa_public { return poll( active_coroutine() ); }
391void enable_ehm() libcfa_public { active_coroutine()->ehm_state.ehm_enabled = true; }
392void disable_ehm() libcfa_public { active_coroutine()->ehm_state.ehm_enabled = false; }
393bool checked_poll() libcfa_public { return active_coroutine()->ehm_state.ehm_enabled ? poll( active_coroutine() ) : false; }
394coroutine$ * resumer() libcfa_public { return active_coroutine()->last; }
395coroutine$ * first_resumer() libcfa_public { return active_coroutine()->starter; }
396
397// user facing ehm operations
398forall(T & | is_coroutine(T)) {
399 void * stack_pointer( T & cor ) libcfa_public { return stack_pointer( get_coroutine( cor ) ); }
400 void stack_verify( T & cor ) libcfa_public { return stack_verify( get_coroutine( cor ) ); }
401
402 // enable/disable non-local exceptions
403 void enable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = true; }
404 void disable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = false; }
405
406 // poll for non-local exceptions
407 bool poll( T & cor ) libcfa_public { return poll( get_coroutine( cor ) ); }
408
409 // poll iff nonlocal ehm is enabled
410 bool checked_poll( T & cor ) libcfa_public { return get_coroutine( cor )->ehm_state.ehm_enabled ? poll( cor ) : false; }
411
412 coroutine$ * resumer( T & cor ) libcfa_public { return get_coroutine( cor )->last; }
413 coroutine$ * first_resumer( T & cor ) libcfa_public { return get_coroutine( cor )->starter; }
414}
415
416// resume non local exception at receiver (i.e. enqueue in ehm buffer)
417forall(exceptT *, T & | ehm_resume_at( exceptT, T ))
418void resumeAt( T & receiver, exceptT & ex ) libcfa_public {
419 coroutine$ * cor = get_coroutine( receiver );
420 nonlocal_exception * nl_ex = alloc();
421 exceptT * ex_copy = alloc();
422 memcpy( ex_copy, &ex, sizeof(exceptT) );
423 (*nl_ex){ (exception_t *)ex_copy };
424 lock( cor->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
425 append( cor->ehm_state.ehm_buffer, nl_ex );
426 unlock( cor->ehm_state.buffer_lock );
427}
428
429forall(exceptT * | { void $throwResume(exceptT &); })
430void resumeAt( coroutine$ * receiver, exceptT & ex ) libcfa_public {
431 nonlocal_exception * nl_ex = alloc();
432 exceptT * ex_copy = alloc();
433 memcpy( ex_copy, &ex, sizeof(exceptT) );
434 (*nl_ex){ (exception_t *)ex_copy };
435 lock( receiver->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
436 append( receiver->ehm_state.ehm_buffer, nl_ex );
437 unlock( receiver->ehm_state.buffer_lock );
438}
439
440// Local Variables: //
441// mode: c //
442// tab-width: 4 //
443// End: //
Note: See TracBrowser for help on using the repository browser.