source: src/libcfa/concurrency/invoke.h@ 0895cba

ADT aaron-thesis arm-eh ast-experimental cleanup-dtors deferred_resn demangler enum forall-pointer-decay jacob/cs343-translation jenkins-sandbox new-ast new-ast-unique-expr new-env no_list persistent-indexer pthread-emulation qualifiedEnum resolv-new with_gc
Last change on this file since 0895cba was b18830e, checked in by Thierry Delisle <tdelisle@…>, 8 years ago

Refactoring monitor code in prevision for proper waitfor support

  • added monitor group struct
  • else and timeout now return negative results
  • Property mode set to 100644
File size: 6.4 KB
Line 
1//
2// Cforall Version 1.0.0 Copyright (C) 2016 University of Waterloo
3//
4// The contents of this file are covered under the licence agreement in the
5// file "LICENCE" distributed with Cforall.
6//
7// invoke.h --
8//
9// Author : Thierry Delisle
10// Created On : Tue Jan 17 12:27:26 2016
11// Last Modified By : Peter A. Buhr
12// Last Modified On : Fri Jul 21 22:28:56 2017
13// Update Count : 1
14//
15
16#include <stdbool.h>
17#include <stdint.h>
18
19#ifdef __CFORALL__
20extern "C" {
21#endif
22
23#if ! defined(__CFA_INVOKE_PRIVATE__)
24#ifndef _INVOKE_H_
25#define _INVOKE_H_
26
27 #define unlikely(x) __builtin_expect(!!(x), 0)
28 #define thread_local _Thread_local
29
30 typedef void (*fptr_t)();
31
32 struct spinlock {
33 volatile int lock;
34 #ifdef __CFA_DEBUG__
35 const char * prev_name;
36 void* prev_thrd;
37 #endif
38 };
39
40 struct __thread_queue_t {
41 struct thread_desc * head;
42 struct thread_desc ** tail;
43 };
44
45 struct __condition_stack_t {
46 struct __condition_criterion_t * top;
47 };
48
49 #ifdef __CFORALL__
50 extern "Cforall" {
51 void ?{}( struct __thread_queue_t & );
52 void append( struct __thread_queue_t *, struct thread_desc * );
53 struct thread_desc * pop_head( struct __thread_queue_t * );
54 struct thread_desc * remove( struct __thread_queue_t *, struct thread_desc ** );
55
56 void ?{}( struct __condition_stack_t & );
57 void push( struct __condition_stack_t *, struct __condition_criterion_t * );
58 struct __condition_criterion_t * pop( struct __condition_stack_t * );
59
60 void ?{}(spinlock & this);
61 void ^?{}(spinlock & this);
62 }
63 #endif
64
65 struct coStack_t {
66 unsigned int size; // size of stack
67 void *storage; // pointer to stack
68 void *limit; // stack grows towards stack limit
69 void *base; // base of stack
70 void *context; // address of cfa_context_t
71 void *top; // address of top of storage
72 bool userStack; // whether or not the user allocated the stack
73 };
74
75 enum coroutine_state { Halted, Start, Inactive, Active, Primed };
76
77 struct coroutine_desc {
78 struct coStack_t stack; // stack information of the coroutine
79 const char *name; // textual name for coroutine/task, initialized by uC++ generated code
80 int errno_; // copy of global UNIX variable errno
81 enum coroutine_state state; // current execution status for coroutine
82 struct coroutine_desc * starter; // first coroutine to resume this one
83 struct coroutine_desc * last; // last coroutine to resume this one
84 };
85
86 struct monitor_desc {
87 struct spinlock lock; // spinlock to protect internal data
88 struct thread_desc * owner; // current owner of the monitor
89 struct __thread_queue_t entry_queue; // queue of threads that are blocked waiting for the monitor
90 struct __condition_stack_t signal_stack; // stack of conditions to run next once we exit the monitor
91 unsigned int recursion; // monitor routines can be called recursively, we need to keep track of that
92
93 struct __acceptable_t * acceptables; // list of acceptable functions, null if any
94 unsigned short acceptable_count; // number of acceptable functions
95 short accepted_index; // the index of the accepted function, -1 if none
96 };
97
98 struct __monitor_group {
99 struct monitor_desc ** list; // currently held monitors
100 short size; // number of currently held monitors
101 fptr_t func; // last function that acquired monitors
102 };
103
104 struct thread_desc {
105 // Core threading fields
106 struct coroutine_desc self_cor; // coroutine body used to store context
107 struct monitor_desc self_mon; // monitor body used for mutual exclusion
108 struct monitor_desc * self_mon_p; // pointer to monitor with sufficient lifetime for current monitors
109 struct __monitor_group monitors; // monitors currently held by this thread
110
111 // Link lists fields
112 struct thread_desc * next; // instrusive link field for threads
113
114
115 };
116
117 #ifdef __CFORALL__
118 extern "Cforall" {
119 static inline monitor_desc * ?[?]( const __monitor_group & this, ptrdiff_t index ) {
120 return this.list[index];
121 }
122
123 static inline bool ?==?( const __monitor_group & lhs, const __monitor_group & rhs ) {
124 if( lhs.size != rhs.size ) return false;
125 if( lhs.func != rhs.func ) return false;
126
127 // Check that all the monitors match
128 for( int i = 0; i < lhs.size; i++ ) {
129 // If not a match, check next function
130 if( lhs[i] != rhs[i] ) return false;
131 }
132
133 return true;
134 }
135 }
136 #endif
137
138#endif //_INVOKE_H_
139#else //! defined(__CFA_INVOKE_PRIVATE__)
140#ifndef _INVOKE_PRIVATE_H_
141#define _INVOKE_PRIVATE_H_
142
143 struct machine_context_t {
144 void *SP;
145 void *FP;
146 void *PC;
147 };
148
149 // assembler routines that performs the context switch
150 extern void CtxInvokeStub( void );
151 void CtxSwitch( void * from, void * to ) asm ("CtxSwitch");
152
153 #if defined( __x86_64__ )
154 #define CtxGet( ctx ) __asm__ ( \
155 "movq %%rsp,%0\n" \
156 "movq %%rbp,%1\n" \
157 : "=rm" (ctx.SP), "=rm" (ctx.FP) )
158 #elif defined( __i386__ )
159 #define CtxGet( ctx ) __asm__ ( \
160 "movl %%esp,%0\n" \
161 "movl %%ebp,%1\n" \
162 : "=rm" (ctx.SP), "=rm" (ctx.FP) )
163 #endif
164
165#endif //_INVOKE_PRIVATE_H_
166#endif //! defined(__CFA_INVOKE_PRIVATE__)
167#ifdef __CFORALL__
168}
169#endif
170
171// Local Variables: //
172// mode: c //
173// tab-width: 4 //
174// End: //
Note: See TracBrowser for help on using the repository browser.