source: libcfa/src/concurrency/coroutine.cfa @ 74c6748

Last change on this file since 74c6748 was 3318dff, checked in by caparsons <caparson@…>, 15 months ago

fixed non-local ehm issue and added no arg resumer routine

  • Property mode set to 100644
File size: 12.7 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// coroutine.c --
8//
9// Author           : Thierry Delisle
10// Created On       : Mon Nov 28 12:27:26 2016
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Thu Feb 16 15:34:46 2023
13// Update Count     : 24
14//
15
16#define __cforall_thread__
17
18#include "coroutine.hfa"
19
20#include <stddef.h>
21#include <malloc.h>
22#include <errno.h>
23#include <string.h>
24#include <unistd.h>
25#include <sys/mman.h>                                                                   // mprotect
26#include <unwind.h>
27
28#include "kernel/private.hfa"
29#include "exception.hfa"
30#include "exception.h"
31#include "math.hfa"
32
33#define CFA_COROUTINE_USE_MMAP 0
34
35#define __CFA_INVOKE_PRIVATE__
36#include "invoke.h"
37
38extern "C" {
39        void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine$ *) __attribute__ ((__noreturn__));
40        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__));
41        static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) {
42                abort();
43        }
44
45        extern void CtxRet( struct __stack_context_t * to ) asm ("CtxRet") __attribute__ ((__noreturn__));
46}
47
48//-----------------------------------------------------------------------------
49forall(T &)
50void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public {
51        dst->virtual_table = src->virtual_table;
52        dst->the_coroutine = src->the_coroutine;
53        dst->the_exception = src->the_exception;
54}
55
56forall(T &)
57const char * msg(CoroutineCancelled(T) *) libcfa_public {
58        return "CoroutineCancelled(...)";
59}
60
61// This code should not be inlined. It is the error path on resume.
62forall(T & | is_coroutine(T))
63void __cfaehm_cancelled_coroutine(
64                T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ) libcfa_public {
65        verify( desc->cancellation );
66        desc->state = Cancelled;
67        exception_t * except = __cfaehm_cancellation_exception( desc->cancellation );
68
69        // TODO: Remove explitate vtable set once trac#186 is fixed.
70        CoroutineCancelled(T) except;
71        except.virtual_table = &_default_vtable;
72        except.the_coroutine = &cor;
73        except.the_exception = except;
74        // Why does this need a cast?
75        throwResume (CoroutineCancelled(T) &)except;
76
77        except->virtual_table->free( except );
78        free( desc->cancellation );
79        desc->cancellation = 0p;
80}
81
82//-----------------------------------------------------------------------------
83// Global state variables
84
85// minimum feasible stack size in bytes
86static const size_t MinStackSize = 1000;
87extern size_t __page_size;                              // architecture pagesize HACK, should go in proper runtime singleton
88extern int __map_prot;
89
90void __stack_prepare( __stack_info_t * this, size_t create_size );
91static void __stack_clean  ( __stack_info_t * this );
92
93//-----------------------------------------------------------------------------
94// Coroutine ctors and dtors
95void ?{}( __stack_info_t & this, void * storage, size_t storageSize ) {
96        this.storage   = (__stack_t *)storage;
97
98        // Did we get a piece of storage ?
99        if (this.storage || storageSize != 0) {
100                // We either got a piece of storage or the user asked for a specific size
101                // Immediately create the stack
102                // (This is slightly unintuitive that non-default sized coroutines create are eagerly created
103                // but it avoids that all coroutines carry an unnecessary size)
104                verify( storageSize != 0 );
105                __stack_prepare( &this, storageSize );
106        }
107}
108
109void ^?{}(__stack_info_t & this) {
110        bool userStack = ((intptr_t)this.storage & 0x1) != 0;
111        if ( ! userStack && this.storage ) {
112                __stack_clean( &this );
113        }
114}
115
116void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) {
117        (this.context){0p, 0p};
118        (this.stack){storage, storageSize};
119        this.name = name;
120        state = Start;
121        starter = 0p;
122        last = 0p;
123        cancellation = 0p;
124    ehm_state.ehm_buffer{};
125    ehm_state.buffer_lock{};
126    ehm_state.ehm_enabled = false;
127}
128
129void ^?{}(coroutine$& this) libcfa_public {
130        if(this.state != Halted && this.state != Start && this.state != Primed) {
131                coroutine$ * src = active_coroutine();
132                coroutine$ * dst = &this;
133
134                struct _Unwind_Exception storage;
135                storage.exception_class = -1;
136                storage.exception_cleanup = _CtxCoroutine_UnwindCleanup;
137                this.cancellation = &storage;
138                this.last = src;
139
140                // not resuming self ?
141                if ( src == dst ) {
142                        abort( "Attempt by coroutine %.256s (%p) to terminate itself.\n", src->name, src );
143                }
144
145                $ctx_switch( src, dst );
146        }
147}
148
149// Part of the Public API
150// Not inline since only ever called once per coroutine
151forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); })
152void prime(T& cor) libcfa_public {
153        coroutine$* this = get_coroutine(cor);
154        assert(this->state == Start);
155
156        this->state = Primed;
157        resume(cor);
158}
159
160static [void *, size_t] __stack_alloc( size_t storageSize ) {
161        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
162        assert(__page_size != 0l);
163        size_t size = libCeiling( storageSize, 16 ) + stack_data_size;
164        size = ceiling(size, __page_size);
165
166        // If we are running debug, we also need to allocate a guardpage to catch stack overflows.
167        void * storage;
168        #if CFA_COROUTINE_USE_MMAP
169                storage = mmap(0p, size + __page_size, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
170                if(storage == ((void*)-1)) {
171                        abort( "coroutine stack creation : internal error, mmap failure, error(%d) %s.", errno, strerror( errno ) );
172                }
173                if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
174                        abort( "coroutine stack creation : internal error, mprotect failure, error(%d) %s.", errno, strerror( errno ) );
175                } // if
176                storage = (void *)(((intptr_t)storage) + __page_size);
177        #else
178                __cfaabi_dbg_debug_do(
179                        storage = memalign( __page_size, size + __page_size );
180                );
181                __cfaabi_dbg_no_debug_do(
182                        storage = (void*)malloc(size);
183                );
184
185                __cfaabi_dbg_debug_do(
186                        if ( mprotect( storage, __page_size, PROT_NONE ) == -1 ) {
187                                abort( "__stack_alloc : internal error, mprotect failure, error(%d) %s.", (int)errno, strerror( (int)errno ) );
188                        }
189                        storage = (void *)(((intptr_t)storage) + __page_size);
190                );
191        #endif
192        __cfaabi_dbg_print_safe("Kernel : Created stack %p of size %zu\n", storage, size);
193
194        verify( ((intptr_t)storage & (libAlign() - 1)) == 0ul );
195        return [storage, size];
196}
197
198static void __stack_clean  ( __stack_info_t * this ) {
199        void * storage = this->storage->limit;
200
201        #if CFA_COROUTINE_USE_MMAP
202                size_t size = ((intptr_t)this->storage->base) - ((intptr_t)this->storage->limit) + sizeof(__stack_t);
203                storage = (void *)(((intptr_t)storage) - __page_size);
204                if(munmap(storage, size + __page_size) == -1) {
205                        abort( "coroutine stack destruction : internal error, munmap failure, error(%d) %s.", errno, strerror( errno ) );
206                }
207        #else
208                __cfaabi_dbg_debug_do(
209                        storage = (char*)(storage) - __page_size;
210                        if ( mprotect( storage, __page_size, __map_prot ) == -1 ) {
211                                abort( "(coStack_t *)%p.^?{}() : internal error, mprotect failure, error(%d) %s.", &this, errno, strerror( errno ) );
212                        }
213                );
214
215                free( storage );
216        #endif
217        __cfaabi_dbg_print_safe("Kernel : Deleting stack %p\n", storage);
218}
219
220void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public {
221        const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment
222        bool userStack;
223        void * storage;
224        size_t size;
225        if ( !this->storage ) {
226                userStack = false;
227                [storage, size] = __stack_alloc( create_size );
228        } else {
229                userStack = true;
230                __cfaabi_dbg_print_safe("Kernel : stack obj %p using user stack %p(%zd bytes)\n", this, this->storage, (intptr_t)this->storage->limit - (intptr_t)this->storage->base);
231
232                // The stack must be aligned, advance the pointer to the next align data
233                storage = (void*)libCeiling( (intptr_t)this->storage, libAlign());
234
235                // The size needs to be shrinked to fit all the extra data structure and be aligned
236                ptrdiff_t diff = (intptr_t)storage - (intptr_t)this->storage;
237                size = libFloor(create_size - stack_data_size - diff, libAlign());
238        } // if
239        assertf( size >= MinStackSize, "Stack size %zd provides less than minimum of %zd bytes for a stack.", size, MinStackSize );
240
241        this->storage = (__stack_t *)((intptr_t)storage + size - sizeof(__stack_t));
242        this->storage->limit = storage;
243        this->storage->base  = (void*)((intptr_t)storage + size - sizeof(__stack_t));
244        this->storage->exception_context.top_resume = 0p;
245        this->storage->exception_context.current_exception = 0p;
246        __attribute__((may_alias)) intptr_t * istorage = (intptr_t*)&this->storage;
247        *istorage |= userStack ? 0x1 : 0x0;
248}
249
250// We need to call suspend from invoke.c, so we expose this wrapper that
251// is not inline (We can't inline Cforall in C)
252extern "C" {
253        void __cfactx_cor_leave( struct coroutine$ * src ) {
254                coroutine$ * starter = src->cancellation != 0 ? src->last : src->starter;
255
256                src->state = Halted;
257
258                assertf( starter != 0,
259                        "Attempt to suspend/leave coroutine \"%.256s\" (%p) that has never been resumed.\n"
260                        "Possible cause is a suspend executed in a member called by a coroutine user rather than by the coroutine main.",
261                        src->name, src );
262                assertf( starter->state != Halted,
263                        "Attempt by coroutine \"%.256s\" (%p) to suspend/leave back to terminated coroutine \"%.256s\" (%p).\n"
264                        "Possible cause is terminated coroutine's main routine has already returned.",
265                        src->name, src, starter->name, starter );
266
267                $ctx_switch( src, starter );
268        }
269
270        struct coroutine$ * __cfactx_cor_finish(void) {
271                struct coroutine$ * cor = active_coroutine();
272
273                // get the active thread once
274                thread$ * athrd = active_thread();
275
276                /* paranoid */ verify( athrd->corctx_flag );
277                athrd->corctx_flag = false;
278
279                if(cor->state == Primed) {
280                        __cfactx_suspend();
281                }
282
283                cor->state = Active;
284
285                return cor;
286        }
287}
288
289
290////////////////////////////////////////////////////////////////////////////////////////////////////
291// non local ehm routines
292
293// helper for popping from coroutine's ehm buffer
294inline nonlocal_exception * pop_ehm_head( coroutine$ * this ) {
295    lock( this->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
296    nonlocal_exception * nl_ex = pop_head( this->ehm_state.ehm_buffer );
297    unlock( this->ehm_state.buffer_lock );
298    return nl_ex;
299}
300
301void defaultResumeAtHandler( exception_t * except ) {
302    __cfaehm_allocate_exception( except );
303    free( except );
304    __cfaehm_begin_unwind( (void(*)(exception_t *))defaultTerminationHandler );
305}
306
307bool poll( coroutine$ * cor ) libcfa_public {
308    nonlocal_exception * nl_ex = pop_ehm_head( cor );
309
310    // if no exceptions return false
311    if ( nl_ex == 0p ) return false;
312   
313    // otherwise loop and throwResume all pending exceptions
314    while ( nl_ex != 0p ){
315        exception_t * ex = nl_ex->the_exception;
316        free( nl_ex );
317        __cfaehm_throw_resume( ex, defaultResumeAtHandler );
318       
319        // only reached if resumption handled. other dealloc handled in defaultResumeAtHandler
320        free( ex );
321        nl_ex = pop_ehm_head( cor );
322    }
323   
324    return true;
325}
326
327bool poll() libcfa_public { return poll( active_coroutine() ); }
328coroutine$ * resumer() libcfa_public { return active_coroutine()->last; }
329
330// user facing ehm operations
331forall(T & | is_coroutine(T)) {
332    // enable/disable non-local exceptions
333    void enable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = true; }
334    void disable_ehm( T & cor ) libcfa_public { get_coroutine( cor )->ehm_state.ehm_enabled = false; }
335
336    // poll for non-local exceptions
337    bool poll( T & cor ) libcfa_public { return poll( get_coroutine( cor ) ); }
338
339    // poll iff nonlocal ehm is enabled
340    bool checked_poll( T & cor ) libcfa_public { return get_coroutine( cor )->ehm_state.ehm_enabled ? poll( cor ) : false; }
341
342    coroutine$ * resumer( T & cor ) libcfa_public { return get_coroutine( cor )->last; }
343}
344
345// resume non local exception at receiver (i.e. enqueue in ehm buffer)
346forall(exceptT *, T & | ehm_resume_at( exceptT, T ))
347void resumeAt( T & receiver, exceptT & ex )  libcfa_public {
348    coroutine$ * cor = get_coroutine( receiver );
349    nonlocal_exception * nl_ex = alloc();
350    exceptT * ex_copy = alloc();
351    memcpy( ex_copy, &ex, sizeof(exceptT) );
352    (*nl_ex){ (exception_t *)ex_copy };
353    lock( cor->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
354    append( cor->ehm_state.ehm_buffer, nl_ex );
355    unlock( cor->ehm_state.buffer_lock );
356}
357
358forall(exceptT * | { void $throwResume(exceptT &); })
359void resumeAt( coroutine$ * receiver, exceptT & ex ) libcfa_public {
360    nonlocal_exception * nl_ex = alloc();
361    exceptT * ex_copy = alloc();
362    memcpy( ex_copy, &ex, sizeof(exceptT) );
363    (*nl_ex){ (exception_t *)ex_copy };
364    lock( receiver->ehm_state.buffer_lock __cfaabi_dbg_ctx2 );
365    append( receiver->ehm_state.ehm_buffer, nl_ex );
366    unlock( receiver->ehm_state.buffer_lock );
367}
368
369// Local Variables: //
370// mode: c //
371// tab-width: 4 //
372// End: //
Note: See TracBrowser for help on using the repository browser.