// // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // invoke.h -- // // Author : Thierry Delisle // Created On : Tue Jan 17 12:27:26 2016 // Last Modified By : Peter A. Buhr // Last Modified On : Tue Jan 23 14:55:46 2018 // Update Count : 3 // #include "bits/containers.h" #include "bits/defs.h" #include "bits/locks.h" #ifdef __cforall extern "C" { #endif #if ! defined(__CFA_INVOKE_PRIVATE__) #ifndef _INVOKE_H_ #define _INVOKE_H_ #ifdef __cforall extern "Cforall" { static inline struct thread_desc * & get_next( struct thread_desc & this ); static inline struct __condition_criterion_t * & get_next( struct __condition_criterion_t & this ); } #endif struct coStack_t { // size of stack size_t size; // pointer to stack void *storage; // stack grows towards stack limit void *limit; // base of stack void *base; // address of cfa_context_t void *context; // address of top of storage void *top; // whether or not the user allocated the stack bool userStack; }; enum coroutine_state { Halted, Start, Inactive, Active, Primed }; struct coroutine_desc { // stack information of the coroutine struct coStack_t stack; // textual name for coroutine/task, initialized by uC++ generated code const char *name; // copy of global UNIX variable errno int errno_; // current execution status for coroutine enum coroutine_state state; // first coroutine to resume this one struct coroutine_desc * starter; // last coroutine to resume this one struct coroutine_desc * last; }; struct __waitfor_mask_t { // the index of the accepted function, -1 if none short * accepted; // list of acceptable functions, null if any __small_array_t(struct __acceptable_t) __cfa_anonymous_object; }; struct monitor_desc { // spinlock to protect internal data struct __spinlock_t lock; // current owner of the monitor struct thread_desc * owner; // queue of threads that are blocked waiting for the monitor __queue_t(struct thread_desc) entry_queue; // stack of conditions to run next once we exit the monitor __stack_t(struct __condition_criterion_t) signal_stack; // monitor routines can be called recursively, we need to keep track of that unsigned int recursion; // mask used to know if some thread is waiting for something while holding the monitor struct __waitfor_mask_t mask; // node used to signal the dtor in a waitfor dtor struct __condition_node_t * dtor_node; }; struct __monitor_group_t { // currently held monitors __small_array_t(monitor_desc*) __cfa_anonymous_object; // last function that acquired monitors fptr_t func; }; struct thread_desc { // Core threading fields // coroutine body used to store context struct coroutine_desc self_cor; // current active context struct coroutine_desc * curr_cor; // monitor body used for mutual exclusion struct monitor_desc self_mon; // pointer to monitor with sufficient lifetime for current monitors struct monitor_desc * self_mon_p; // monitors currently held by this thread struct __monitor_group_t monitors; // Link lists fields // instrusive link field for threads struct thread_desc * next; __cfaabi_dbg_debug_do( // instrusive link field for debugging struct thread_desc * dbg_next; struct thread_desc * dbg_prev; ) }; #ifdef __cforall extern "Cforall" { static inline thread_desc * & get_next( thread_desc & this ) { return this.next; } static inline struct __condition_criterion_t * & get_next( struct __condition_criterion_t & this ); static inline void ?{}(__monitor_group_t & this) { (this.data){NULL}; (this.size){0}; (this.func){NULL}; } static inline void ?{}(__monitor_group_t & this, struct monitor_desc ** data, __lock_size_t size, fptr_t func) { (this.data){data}; (this.size){size}; (this.func){func}; } static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) { if( (lhs.data != 0) != (rhs.data != 0) ) return false; if( lhs.size != rhs.size ) return false; if( lhs.func != rhs.func ) return false; // Check that all the monitors match for( int i = 0; i < lhs.size; i++ ) { // If not a match, check next function if( lhs[i] != rhs[i] ) return false; } return true; } static inline void ?=?(__monitor_group_t & lhs, const __monitor_group_t & rhs) { lhs.data = rhs.data; lhs.size = rhs.size; lhs.func = rhs.func; } } #endif #endif //_INVOKE_H_ #else //! defined(__CFA_INVOKE_PRIVATE__) #ifndef _INVOKE_PRIVATE_H_ #define _INVOKE_PRIVATE_H_ struct machine_context_t { void *SP; void *FP; void *PC; }; // assembler routines that performs the context switch extern void CtxInvokeStub( void ); void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); #if defined( __x86_64__ ) #define CtxGet( ctx ) __asm__ ( \ "movq %%rsp,%0\n" \ "movq %%rbp,%1\n" \ : "=rm" (ctx.SP), "=rm" (ctx.FP) ) #elif defined( __i386__ ) #define CtxGet( ctx ) __asm__ ( \ "movl %%esp,%0\n" \ "movl %%ebp,%1\n" \ : "=rm" (ctx.SP), "=rm" (ctx.FP) ) #elif defined( __ARM_ARCH ) #define CtxGet( ctx ) __asm__ ( \ "mov %0,%%sp\n" \ "mov %1,%%r11\n" \ : "=rm" (ctx.SP), "=rm" (ctx.FP) ) #endif #endif //_INVOKE_PRIVATE_H_ #endif //! defined(__CFA_INVOKE_PRIVATE__) #ifdef __cforall } #endif // Local Variables: // // mode: c // // tab-width: 4 // // End: //