// // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // invoke.h -- // // Author : Thierry Delisle // Created On : Tue Jan 17 12:27:26 2016 // Last Modified By : Peter A. Buhr // Last Modified On : Fri Jul 21 22:28:56 2017 // Update Count : 1 // #include #include #ifdef __CFORALL__ extern "C" { #endif #if ! defined(__CFA_INVOKE_PRIVATE__) #ifndef _INVOKE_H_ #define _INVOKE_H_ #define unlikely(x) __builtin_expect(!!(x), 0) #define thread_local _Thread_local typedef void (*fptr_t)(); struct spinlock { volatile int lock; #ifdef __CFA_DEBUG__ const char * prev_name; void* prev_thrd; #endif }; struct __thread_queue_t { struct thread_desc * head; struct thread_desc ** tail; }; struct __condition_stack_t { struct __condition_criterion_t * top; }; #ifdef __CFORALL__ extern "Cforall" { void ?{}( struct __thread_queue_t & ); void append( struct __thread_queue_t *, struct thread_desc * ); struct thread_desc * pop_head( struct __thread_queue_t * ); struct thread_desc * remove( struct __thread_queue_t *, struct thread_desc ** ); void ?{}( struct __condition_stack_t & ); void push( struct __condition_stack_t *, struct __condition_criterion_t * ); struct __condition_criterion_t * pop( struct __condition_stack_t * ); void ?{}(spinlock & this); void ^?{}(spinlock & this); } #endif struct coStack_t { unsigned int size; // size of stack void *storage; // pointer to stack void *limit; // stack grows towards stack limit void *base; // base of stack void *context; // address of cfa_context_t void *top; // address of top of storage bool userStack; // whether or not the user allocated the stack }; enum coroutine_state { Halted, Start, Inactive, Active, Primed }; struct coroutine_desc { struct coStack_t stack; // stack information of the coroutine const char *name; // textual name for coroutine/task, initialized by uC++ generated code int errno_; // copy of global UNIX variable errno enum coroutine_state state; // current execution status for coroutine struct coroutine_desc * starter; // first coroutine to resume this one struct coroutine_desc * last; // last coroutine to resume this one }; struct __waitfor_mask_t { short * accepted; // the index of the accepted function, -1 if none struct __acceptable_t * clauses; // list of acceptable functions, null if any short size; // number of acceptable functions }; struct monitor_desc { struct spinlock lock; // spinlock to protect internal data struct thread_desc * owner; // current owner of the monitor struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that struct __waitfor_mask_t mask; // mask used to know if some thread is waiting for something while holding the monitor struct __condition_node_t * dtor_node; // node used to signal the dtor in a waitfor dtor }; struct __monitor_group_t { struct monitor_desc ** list; // currently held monitors short size; // number of currently held monitors fptr_t func; // last function that acquired monitors }; struct thread_desc { // Core threading fields struct coroutine_desc self_cor; // coroutine body used to store context struct monitor_desc self_mon; // monitor body used for mutual exclusion struct monitor_desc * self_mon_p; // pointer to monitor with sufficient lifetime for current monitors struct __monitor_group_t monitors; // monitors currently held by this thread // Link lists fields struct thread_desc * next; // instrusive link field for threads }; #ifdef __CFORALL__ extern "Cforall" { static inline monitor_desc * ?[?]( const __monitor_group_t & this, ptrdiff_t index ) { return this.list[index]; } static inline bool ?==?( const __monitor_group_t & lhs, const __monitor_group_t & rhs ) { if( (lhs.list != 0) != (rhs.list != 0) ) return false; if( lhs.size != rhs.size ) return false; if( lhs.func != rhs.func ) return false; // Check that all the monitors match for( int i = 0; i < lhs.size; i++ ) { // If not a match, check next function if( lhs[i] != rhs[i] ) return false; } return true; } } #endif #endif //_INVOKE_H_ #else //! defined(__CFA_INVOKE_PRIVATE__) #ifndef _INVOKE_PRIVATE_H_ #define _INVOKE_PRIVATE_H_ struct machine_context_t { void *SP; void *FP; void *PC; }; // assembler routines that performs the context switch extern void CtxInvokeStub( void ); void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); #if defined( __x86_64__ ) #define CtxGet( ctx ) __asm__ ( \ "movq %%rsp,%0\n" \ "movq %%rbp,%1\n" \ : "=rm" (ctx.SP), "=rm" (ctx.FP) ) #elif defined( __i386__ ) #define CtxGet( ctx ) __asm__ ( \ "movl %%esp,%0\n" \ "movl %%ebp,%1\n" \ : "=rm" (ctx.SP), "=rm" (ctx.FP) ) #endif #endif //_INVOKE_PRIVATE_H_ #endif //! defined(__CFA_INVOKE_PRIVATE__) #ifdef __CFORALL__ } #endif // Local Variables: // // mode: c // // tab-width: 4 // // End: //