// -*- Mode: C -*- // // Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo // // The contents of this file are covered under the licence agreement in the // file "LICENCE" distributed with Cforall. // // invoke.h -- // // Author : Thierry Delisle // Created On : Tue Jan 17 12:27:26 2016 // Last Modified By : Thierry Delisle // Last Modified On : -- // Update Count : 0 // #include #include #ifdef __CFORALL__ extern "C" { #endif #if ! defined(__CFA_INVOKE_PRIVATE__) #ifndef _INVOKE_H_ #define _INVOKE_H_ #define unlikely(x) __builtin_expect(!!(x), 0) #define thread_local _Thread_local struct spinlock { volatile int lock; #ifdef __CFA_DEBUG__ const char * prev_name; void* prev_thrd; #endif }; struct __thread_queue_t { struct thread_desc * head; struct thread_desc ** tail; }; struct __condition_stack_t { struct __condition_criterion_t * top; }; #ifdef __CFORALL__ extern "Cforall" { void ?{}( struct __thread_queue_t * ); void append( struct __thread_queue_t *, struct thread_desc * ); struct thread_desc * pop_head( struct __thread_queue_t * ); void ?{}( struct __condition_stack_t * ); void push( struct __condition_stack_t *, struct __condition_criterion_t * ); struct __condition_criterion_t * pop( struct __condition_stack_t * ); void ?{}(spinlock * this); void ^?{}(spinlock * this); } #endif struct coStack_t { unsigned int size; // size of stack void *storage; // pointer to stack void *limit; // stack grows towards stack limit void *base; // base of stack void *context; // address of cfa_context_t void *top; // address of top of storage bool userStack; // whether or not the user allocated the stack }; enum coroutine_state { Halted, Start, Inactive, Active, Primed }; struct coroutine_desc { struct coStack_t stack; // stack information of the coroutine const char *name; // textual name for coroutine/task, initialized by uC++ generated code int errno_; // copy of global UNIX variable errno enum coroutine_state state; // current execution status for coroutine struct coroutine_desc *starter; // first coroutine to resume this one struct coroutine_desc *last; // last coroutine to resume this one }; struct monitor_desc { struct spinlock lock; // spinlock to protect internal data struct thread_desc * owner; // current owner of the monitor struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that }; struct thread_desc { struct coroutine_desc cor; // coroutine body used to store context struct monitor_desc mon; // monitor body used for mutual exclusion struct thread_desc * next; // instrusive link field for threads struct monitor_desc ** current_monitors; // currently held monitors unsigned short current_monitor_count; // number of currently held monitors }; #endif //_INVOKE_H_ #else //! defined(__CFA_INVOKE_PRIVATE__) #ifndef _INVOKE_PRIVATE_H_ #define _INVOKE_PRIVATE_H_ struct machine_context_t { void *SP; void *FP; void *PC; }; // assembler routines that performs the context switch extern void CtxInvokeStub( void ); void CtxSwitch( void * from, void * to ) asm ("CtxSwitch"); #if defined( __x86_64__ ) #define CtxGet( ctx ) __asm__ ( \ "movq %%rsp,%0\n" \ "movq %%rbp,%1\n" \ : "=rm" (ctx.SP), "=rm" (ctx.FP) ) #elif defined( __i386__ ) #define CtxGet( ctx ) __asm__ ( \ "movl %%esp,%0\n" \ "movl %%ebp,%1\n" \ : "=rm" (ctx.SP), "=rm" (ctx.FP) ) #endif #endif //_INVOKE_PRIVATE_H_ #endif //! defined(__CFA_INVOKE_PRIVATE__) #ifdef __CFORALL__ } #endif