Changeset e84ab3d
- Timestamp:
- Jul 5, 2021, 4:44:20 PM (2 years ago)
- Branches:
- ADT, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 7f62b708
- Parents:
- ee23a8d
- Files:
-
- 30 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/prelude/builtins.c
ree23a8d re84ab3d 57 57 58 58 // generator support 59 struct $generator{59 struct generator$ { 60 60 inline int; 61 61 }; 62 62 63 static inline void ?{}( $generator& this) { ((int&)this) = 0; }64 static inline void ^?{}( $generator&) {}63 static inline void ?{}(generator$ & this) { ((int&)this) = 0; } 64 static inline void ^?{}(generator$ &) {} 65 65 66 66 trait is_generator(T &) { 67 67 void main(T & this); 68 $generator* get_generator(T & this);68 generator$ * get_generator(T & this); 69 69 }; 70 70 -
libcfa/src/bits/weakso_locks.cfa
ree23a8d re84ab3d 24 24 bool try_lock( blocking_lock & ) { return false; } 25 25 void unlock( blocking_lock & ) {} 26 void on_notify( blocking_lock &, struct $thread* ) {}26 void on_notify( blocking_lock &, struct thread$ * ) {} 27 27 size_t on_wait( blocking_lock & ) { return 0; } 28 28 void on_wakeup( blocking_lock &, size_t ) {} -
libcfa/src/bits/weakso_locks.hfa
ree23a8d re84ab3d 23 23 #include "containers/list.hfa" 24 24 25 struct $thread;25 struct thread$; 26 26 27 27 //----------------------------------------------------------------------------- … … 32 32 33 33 // List of blocked threads 34 dlist( $thread) blocked_threads;34 dlist( thread$ ) blocked_threads; 35 35 36 36 // Count of current blocked threads … … 44 44 45 45 // Current thread owning the lock 46 struct $thread* owner;46 struct thread$ * owner; 47 47 48 48 // Number of recursion level … … 56 56 bool try_lock( blocking_lock & this ) OPTIONAL_THREAD; 57 57 void unlock( blocking_lock & this ) OPTIONAL_THREAD; 58 void on_notify( blocking_lock & this, struct $thread* t ) OPTIONAL_THREAD;58 void on_notify( blocking_lock & this, struct thread$ * t ) OPTIONAL_THREAD; 59 59 size_t on_wait( blocking_lock & this ) OPTIONAL_THREAD; 60 60 void on_wakeup( blocking_lock & this, size_t ) OPTIONAL_THREAD; … … 74 74 static inline size_t on_wait ( multiple_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 75 75 static inline void on_wakeup( multiple_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 76 static inline void on_notify( multiple_acquisition_lock & this, struct $thread* t ){ on_notify( (blocking_lock &)this, t ); }76 static inline void on_notify( multiple_acquisition_lock & this, struct thread$ * t ){ on_notify( (blocking_lock &)this, t ); } -
libcfa/src/concurrency/alarm.cfa
ree23a8d re84ab3d 51 51 //============================================================================================= 52 52 53 void ?{}( alarm_node_t & this, $thread* thrd, Duration alarm, Duration period) with( this ) {53 void ?{}( alarm_node_t & this, thread$ * thrd, Duration alarm, Duration period) with( this ) { 54 54 this.initial = alarm; 55 55 this.period = period; -
libcfa/src/concurrency/alarm.hfa
ree23a8d re84ab3d 25 25 #include "containers/list.hfa" 26 26 27 struct $thread;27 struct thread$; 28 28 struct processor; 29 29 … … 52 52 53 53 union { 54 $thread* thrd; // thrd who created event54 thread$ * thrd; // thrd who created event 55 55 processor * proc; // proc who created event 56 56 Alarm_Callback callback; // callback to handle event … … 63 63 P9_EMBEDDED( alarm_node_t, dlink(alarm_node_t) ) 64 64 65 void ?{}( alarm_node_t & this, $thread* thrd, Duration alarm, Duration period );65 void ?{}( alarm_node_t & this, thread$ * thrd, Duration alarm, Duration period ); 66 66 void ?{}( alarm_node_t & this, processor * proc, Duration alarm, Duration period ); 67 67 void ?{}( alarm_node_t & this, Alarm_Callback callback, Duration alarm, Duration period ); -
libcfa/src/concurrency/clib/cfathread.cfa
ree23a8d re84ab3d 23 23 #include "cfathread.h" 24 24 25 extern void ?{}(processor &, const char[], cluster &, $thread*);25 extern void ?{}(processor &, const char[], cluster &, thread$ *); 26 26 extern "C" { 27 27 extern void __cfactx_invoke_thread(void (*main)(void *), void * this); … … 34 34 35 35 struct cfathread_object { 36 $threadself;36 thread$ self; 37 37 void * (*themain)( void * ); 38 38 void * arg; … … 42 42 void ^?{}(cfathread_object & mutex this); 43 43 44 static inline $thread* get_thread( cfathread_object & this ) { return &this.self; }44 static inline thread$ * get_thread( cfathread_object & this ) { return &this.self; } 45 45 46 46 typedef ThreadCancelled(cfathread_object) cfathread_exception; … … 81 81 // Special Init Thread responsible for the initialization or processors 82 82 struct __cfainit { 83 $threadself;83 thread$ self; 84 84 void (*init)( void * ); 85 85 void * arg; … … 88 88 void ^?{}(__cfainit & mutex this); 89 89 90 static inline $thread* get_thread( __cfainit & this ) { return &this.self; }90 static inline thread$ * get_thread( __cfainit & this ) { return &this.self; } 91 91 92 92 typedef ThreadCancelled(__cfainit) __cfainit_exception; … … 109 109 110 110 // Don't use __thrd_start! just prep the context manually 111 $thread* this_thrd = get_thread(this);111 thread$ * this_thrd = get_thread(this); 112 112 void (*main_p)(__cfainit &) = main; 113 113 -
libcfa/src/concurrency/coroutine.cfa
ree23a8d re84ab3d 37 37 38 38 extern "C" { 39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct $coroutine*) __attribute__ ((__noreturn__));39 void _CtxCoroutine_Unwind(struct _Unwind_Exception * storage, struct coroutine$ *) __attribute__ ((__noreturn__)); 40 40 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) __attribute__ ((__noreturn__)); 41 41 static void _CtxCoroutine_UnwindCleanup(_Unwind_Reason_Code, struct _Unwind_Exception *) { … … 62 62 forall(T & | is_coroutine(T)) 63 63 void __cfaehm_cancelled_coroutine( 64 T & cor, $coroutine* desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) { 65 65 verify( desc->cancellation ); 66 66 desc->state = Cancelled; … … 114 114 } 115 115 116 void ?{}( $coroutine& this, const char name[], void * storage, size_t storageSize ) with( this ) {116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) { 117 117 (this.context){0p, 0p}; 118 118 (this.stack){storage, storageSize}; … … 124 124 } 125 125 126 void ^?{}( $coroutine& this) {126 void ^?{}(coroutine$& this) { 127 127 if(this.state != Halted && this.state != Start && this.state != Primed) { 128 $coroutine* src = active_coroutine();129 $coroutine* dst = &this;128 coroutine$ * src = active_coroutine(); 129 coroutine$ * dst = &this; 130 130 131 131 struct _Unwind_Exception storage; … … 148 148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); }) 149 149 void prime(T& cor) { 150 $coroutine* this = get_coroutine(cor);150 coroutine$* this = get_coroutine(cor); 151 151 assert(this->state == Start); 152 152 … … 248 248 // is not inline (We can't inline Cforall in C) 249 249 extern "C" { 250 void __cfactx_cor_leave( struct $coroutine* src ) {251 $coroutine* starter = src->cancellation != 0 ? src->last : src->starter;250 void __cfactx_cor_leave( struct coroutine$ * src ) { 251 coroutine$ * starter = src->cancellation != 0 ? src->last : src->starter; 252 252 253 253 src->state = Halted; … … 265 265 } 266 266 267 struct $coroutine* __cfactx_cor_finish(void) {268 struct $coroutine* cor = active_coroutine();267 struct coroutine$ * __cfactx_cor_finish(void) { 268 struct coroutine$ * cor = active_coroutine(); 269 269 270 270 // get the active thread once 271 $thread* athrd = active_thread();271 thread$ * athrd = active_thread(); 272 272 273 273 /* paranoid */ verify( athrd->corctx_flag ); -
libcfa/src/concurrency/coroutine.hfa
ree23a8d re84ab3d 39 39 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled, (T))) { 40 40 void main(T & this); 41 $coroutine* get_coroutine(T & this);41 coroutine$ * get_coroutine(T & this); 42 42 }; 43 43 44 #define DECL_COROUTINE(X) static inline $coroutine* get_coroutine(X& this) { return &this.__cor; } void main(X& this)44 #define DECL_COROUTINE(X) static inline coroutine$* get_coroutine(X& this) { return &this.__cor; } void main(X& this) 45 45 46 46 //----------------------------------------------------------------------------- … … 49 49 // void ^?{}( coStack_t & this ); 50 50 51 void ?{}( $coroutine& this, const char name[], void * storage, size_t storageSize );52 void ^?{}( $coroutine& this );53 54 static inline void ?{}( $coroutine& this) { this{ "Anonymous Coroutine", 0p, 0 }; }55 static inline void ?{}( $coroutine& this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; }56 static inline void ?{}( $coroutine& this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; }57 static inline void ?{}( $coroutine& this, const char name[]) { this{ name, 0p, 0 }; }58 static inline void ?{}( $coroutine& this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; }51 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ); 52 void ^?{}( coroutine$ & this ); 53 54 static inline void ?{}( coroutine$ & this) { this{ "Anonymous Coroutine", 0p, 0 }; } 55 static inline void ?{}( coroutine$ & this, size_t stackSize) { this{ "Anonymous Coroutine", 0p, stackSize }; } 56 static inline void ?{}( coroutine$ & this, void * storage, size_t storageSize ) { this{ "Anonymous Coroutine", storage, storageSize }; } 57 static inline void ?{}( coroutine$ & this, const char name[]) { this{ name, 0p, 0 }; } 58 static inline void ?{}( coroutine$ & this, const char name[], size_t stackSize ) { this{ name, 0p, stackSize }; } 59 59 60 60 //----------------------------------------------------------------------------- … … 63 63 void prime(T & cor); 64 64 65 static inline struct $coroutine* active_coroutine() { return active_thread()->curr_cor; }65 static inline struct coroutine$ * active_coroutine() { return active_thread()->curr_cor; } 66 66 67 67 //----------------------------------------------------------------------------- … … 73 73 74 74 forall(T &) 75 void __cfactx_start(void (*main)(T &), struct $coroutine* cor, T & this, void (*invoke)(void (*main)(void *), void *));76 77 extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine*) __attribute__ ((__noreturn__));75 void __cfactx_start(void (*main)(T &), struct coroutine$ * cor, T & this, void (*invoke)(void (*main)(void *), void *)); 76 77 extern void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ *) __attribute__ ((__noreturn__)); 78 78 79 79 extern void __cfactx_switch( struct __stack_context_t * from, struct __stack_context_t * to ) asm ("__cfactx_switch"); … … 82 82 // Private wrappers for context switch and stack creation 83 83 // Wrapper for co 84 static inline void $ctx_switch( $coroutine * src, $coroutine* dst ) __attribute__((nonnull (1, 2))) {84 static inline void $ctx_switch( coroutine$ * src, coroutine$ * dst ) __attribute__((nonnull (1, 2))) { 85 85 // set state of current coroutine to inactive 86 86 src->state = src->state == Halted ? Halted : Blocked; 87 87 88 88 // get the active thread once 89 $thread* athrd = active_thread();89 thread$ * athrd = active_thread(); 90 90 91 91 // Mark the coroutine … … 124 124 // will also migrate which means this value will 125 125 // stay in syn with the TLS 126 $coroutine* src = active_coroutine();126 coroutine$ * src = active_coroutine(); 127 127 128 128 assertf( src->last != 0, … … 141 141 forall(T & | is_coroutine(T)) 142 142 void __cfaehm_cancelled_coroutine( 143 T & cor, $coroutine* desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) );143 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ); 144 144 145 145 // Resume implementation inlined for performance … … 151 151 // will also migrate which means this value will 152 152 // stay in syn with the TLS 153 $coroutine* src = active_coroutine();154 $coroutine* dst = get_coroutine(cor);153 coroutine$ * src = active_coroutine(); 154 coroutine$ * dst = get_coroutine(cor); 155 155 156 156 if( unlikely(dst->context.SP == 0p) ) { … … 180 180 } 181 181 182 static inline void resume( $coroutine* dst ) __attribute__((nonnull (1))) {182 static inline void resume( coroutine$ * dst ) __attribute__((nonnull (1))) { 183 183 // optimization : read TLS once and reuse it 184 184 // Safety note: this is preemption safe since if … … 186 186 // will also migrate which means this value will 187 187 // stay in syn with the TLS 188 $coroutine* src = active_coroutine();188 coroutine$ * src = active_coroutine(); 189 189 190 190 // not resuming self ? -
libcfa/src/concurrency/exception.cfa
ree23a8d re84ab3d 20 20 #include "coroutine.hfa" 21 21 22 extern struct $thread* mainThread;22 extern struct thread$ * mainThread; 23 23 extern "C" { 24 24 extern void __cfactx_thrd_leave(); … … 55 55 56 56 STOP_AT_END_FUNCTION(coroutine_cancelstop, 57 struct $coroutine * src = ($coroutine*)stop_param;58 struct $coroutine* dst = src->last;57 struct coroutine$ * src = (coroutine$ *)stop_param; 58 struct coroutine$ * dst = src->last; 59 59 60 60 $ctx_switch( src, dst ); … … 72 72 void * stop_param; 73 73 74 struct $thread* this_thread = active_thread();74 struct thread$ * this_thread = active_thread(); 75 75 if ( &this_thread->self_cor != this_thread->curr_cor ) { 76 struct $coroutine* cor = this_thread->curr_cor;76 struct coroutine$ * cor = this_thread->curr_cor; 77 77 cor->cancellation = unwind_exception; 78 78 -
libcfa/src/concurrency/invoke.c
ree23a8d re84ab3d 29 29 // Called from the kernel when starting a coroutine or task so must switch back to user mode. 30 30 31 extern struct $coroutine* __cfactx_cor_finish(void);32 extern void __cfactx_cor_leave ( struct $coroutine* );31 extern struct coroutine$ * __cfactx_cor_finish(void); 32 extern void __cfactx_cor_leave ( struct coroutine$ * ); 33 33 extern void __cfactx_thrd_leave(); 34 34 … … 41 41 ) { 42 42 // Finish setting up the coroutine by setting its state 43 struct $coroutine* cor = __cfactx_cor_finish();43 struct coroutine$ * cor = __cfactx_cor_finish(); 44 44 45 45 // Call the main of the coroutine … … 70 70 } 71 71 72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine* cor) __attribute__ ((__noreturn__));73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct $coroutine* cor) {72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__)); 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) { 74 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); 75 75 printf("UNWIND ERROR %d after force unwind\n", ret); … … 100 100 void __cfactx_start( 101 101 void (*main)(void *), 102 struct $coroutine* cor,102 struct coroutine$ * cor, 103 103 void *this, 104 104 void (*invoke)(void *) -
libcfa/src/concurrency/invoke.h
ree23a8d re84ab3d 71 71 enum __Coroutine_State { Halted, Start, Primed, Blocked, Ready, Active, Cancelled, Halting }; 72 72 73 struct $coroutine{73 struct coroutine$ { 74 74 // context that is switch during a __cfactx_switch 75 75 struct __stack_context_t context; … … 85 85 86 86 // first coroutine to resume this one 87 struct $coroutine* starter;87 struct coroutine$ * starter; 88 88 89 89 // last coroutine to resume this one 90 struct $coroutine* last;90 struct coroutine$ * last; 91 91 92 92 // If non-null stack must be unwound with this exception … … 95 95 }; 96 96 // Wrapper for gdb 97 struct cfathread_coroutine_t { struct $coroutinedebug; };98 99 static inline struct __stack_t * __get_stack( struct $coroutine* cor ) {97 struct cfathread_coroutine_t { struct coroutine$ debug; }; 98 99 static inline struct __stack_t * __get_stack( struct coroutine$ * cor ) { 100 100 return (struct __stack_t*)(((uintptr_t)cor->stack.storage) & ((uintptr_t)-2)); 101 101 } … … 110 110 }; 111 111 112 struct $monitor{112 struct monitor$ { 113 113 // spinlock to protect internal data 114 114 struct __spinlock_t lock; 115 115 116 116 // current owner of the monitor 117 struct $thread* owner;117 struct thread$ * owner; 118 118 119 119 // queue of threads that are blocked waiting for the monitor 120 __queue_t(struct $thread) entry_queue;120 __queue_t(struct thread$) entry_queue; 121 121 122 122 // stack of conditions to run next once we exit the monitor … … 133 133 }; 134 134 // Wrapper for gdb 135 struct cfathread_monitor_t { struct $monitordebug; };135 struct cfathread_monitor_t { struct monitor$ debug; }; 136 136 137 137 struct __monitor_group_t { 138 138 // currently held monitors 139 __cfa_anonymous_object( __small_array_t( $monitor*) );139 __cfa_anonymous_object( __small_array_t(monitor$*) ); 140 140 141 141 // last function that acquired monitors … … 146 146 // instrusive link field for threads 147 147 struct __thread_desc_link { 148 struct $thread* next;148 struct thread$ * next; 149 149 volatile unsigned long long ts; 150 150 }; 151 151 152 struct $thread{152 struct thread$ { 153 153 // Core threading fields 154 154 // context that is switch during a __cfactx_switch … … 179 179 180 180 // coroutine body used to store context 181 struct $coroutineself_cor;181 struct coroutine$ self_cor; 182 182 183 183 // current active context 184 struct $coroutine* curr_cor;184 struct coroutine$ * curr_cor; 185 185 186 186 // monitor body used for mutual exclusion 187 struct $monitorself_mon;187 struct monitor$ self_mon; 188 188 189 189 // pointer to monitor with sufficient lifetime for current monitors 190 struct $monitor* self_mon_p;190 struct monitor$ * self_mon_p; 191 191 192 192 // monitors currently held by this thread … … 195 195 // used to put threads on user data structures 196 196 struct { 197 struct $thread* next;198 struct $thread* back;197 struct thread$ * next; 198 struct thread$ * back; 199 199 } seqable; 200 200 201 201 // used to put threads on dlist data structure 202 __cfa_dlink( $thread);202 __cfa_dlink(thread$); 203 203 204 204 struct { 205 struct $thread* next;206 struct $thread* prev;205 struct thread$ * next; 206 struct thread$ * prev; 207 207 } node; 208 208 … … 214 214 }; 215 215 #ifdef __cforall 216 P9_EMBEDDED( $thread, dlink($thread) )216 P9_EMBEDDED( thread$, dlink(thread$) ) 217 217 #endif 218 218 // Wrapper for gdb 219 struct cfathread_thread_t { struct $threaddebug; };219 struct cfathread_thread_t { struct thread$ debug; }; 220 220 221 221 #ifdef __CFA_DEBUG__ 222 void __cfaabi_dbg_record_thrd( $thread& this, bool park, const char prev_name[]);222 void __cfaabi_dbg_record_thrd(thread$ & this, bool park, const char prev_name[]); 223 223 #else 224 224 #define __cfaabi_dbg_record_thrd(x, y, z) … … 228 228 extern "Cforall" { 229 229 230 static inline $thread *& get_next( $thread& this ) __attribute__((const)) {230 static inline thread$ *& get_next( thread$ & this ) __attribute__((const)) { 231 231 return this.link.next; 232 232 } 233 233 234 static inline [ $thread *&, $thread *& ] __get( $thread& this ) __attribute__((const)) {234 static inline [thread$ *&, thread$ *& ] __get( thread$ & this ) __attribute__((const)) { 235 235 return this.node.[next, prev]; 236 236 } 237 237 238 static inline $thread * volatile & ?`next ( $thread* this ) __attribute__((const)) {238 static inline thread$ * volatile & ?`next ( thread$ * this ) __attribute__((const)) { 239 239 return this->seqable.next; 240 240 } 241 241 242 static inline $thread *& Back( $thread* this ) __attribute__((const)) {242 static inline thread$ *& Back( thread$ * this ) __attribute__((const)) { 243 243 return this->seqable.back; 244 244 } 245 245 246 static inline $thread *& Next( $thread* this ) __attribute__((const)) {246 static inline thread$ *& Next( thread$ * this ) __attribute__((const)) { 247 247 return this->seqable.next; 248 248 } 249 249 250 static inline bool listed( $thread* this ) {250 static inline bool listed( thread$ * this ) { 251 251 return this->seqable.next != 0p; 252 252 } … … 258 258 } 259 259 260 static inline void ?{}(__monitor_group_t & this, struct $monitor** data, __lock_size_t size, fptr_t func) {260 static inline void ?{}(__monitor_group_t & this, struct monitor$ ** data, __lock_size_t size, fptr_t func) { 261 261 (this.data){data}; 262 262 (this.size){size}; -
libcfa/src/concurrency/io.cfa
ree23a8d re84ab3d 90 90 static inline unsigned __flush( struct $io_context & ); 91 91 static inline __u32 __release_sqes( struct $io_context & ); 92 extern void __kernel_unpark( $thread* thrd );92 extern void __kernel_unpark( thread$ * thrd ); 93 93 94 94 bool __cfa_io_drain( processor * proc ) { -
libcfa/src/concurrency/io/types.hfa
ree23a8d re84ab3d 179 179 180 180 static inline { 181 $thread* fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) {181 thread$ * fulfil( io_future_t & this, __s32 result, bool do_unpark = true ) { 182 182 this.result = result; 183 183 return fulfil(this.self, do_unpark); -
libcfa/src/concurrency/kernel.cfa
ree23a8d re84ab3d 110 110 #endif 111 111 112 extern $thread* mainThread;112 extern thread$ * mainThread; 113 113 extern processor * mainProcessor; 114 114 115 115 //----------------------------------------------------------------------------- 116 116 // Kernel Scheduling logic 117 static $thread* __next_thread(cluster * this);118 static $thread* __next_thread_slow(cluster * this);119 static inline bool __must_unpark( $thread* thrd ) __attribute((nonnull(1)));120 static void __run_thread(processor * this, $thread* dst);117 static thread$ * __next_thread(cluster * this); 118 static thread$ * __next_thread_slow(cluster * this); 119 static inline bool __must_unpark( thread$ * thrd ) __attribute((nonnull(1))); 120 static void __run_thread(processor * this, thread$ * dst); 121 121 static void __wake_one(cluster * cltr); 122 122 … … 181 181 __cfadbg_print_safe(runtime_core, "Kernel : core %p started\n", this); 182 182 183 $thread* readyThread = 0p;183 thread$ * readyThread = 0p; 184 184 MAIN_LOOP: 185 185 for() { … … 388 388 // runThread runs a thread by context switching 389 389 // from the processor coroutine to the target thread 390 static void __run_thread(processor * this, $thread* thrd_dst) {390 static void __run_thread(processor * this, thread$ * thrd_dst) { 391 391 /* paranoid */ verify( ! __preemption_enabled() ); 392 392 /* paranoid */ verifyf( thrd_dst->state == Ready || thrd_dst->preempted != __NO_PREEMPTION, "state : %d, preempted %d\n", thrd_dst->state, thrd_dst->preempted); … … 396 396 __cfadbg_print_safe(runtime_core, "Kernel : core %p running thread %p (%s)\n", this, thrd_dst, thrd_dst->self_cor.name); 397 397 398 $coroutine* proc_cor = get_coroutine(this->runner);398 coroutine$ * proc_cor = get_coroutine(this->runner); 399 399 400 400 // set state of processor coroutine to inactive … … 415 415 /* paranoid */ verify( thrd_dst->context.SP ); 416 416 /* paranoid */ verify( thrd_dst->state != Halted ); 417 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor418 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor417 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); // add escape condition if we are setting up the processor 418 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->curr_cor == proc_cor || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); // add escape condition if we are setting up the processor 419 419 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 420 420 … … 428 428 429 429 /* paranoid */ verify( 0x0D15EA5E0D15EA5Ep == thrd_dst->canary ); 430 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too large.\n", thrd_dst );431 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination $thread%p has been corrupted.\n StackPointer too small.\n", thrd_dst );430 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) > ((uintptr_t)__get_stack(thrd_dst->curr_cor)->limit) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_dst ); 431 /* paranoid */ verifyf( ((uintptr_t)thrd_dst->context.SP) < ((uintptr_t)__get_stack(thrd_dst->curr_cor)->base ) || thrd_dst->corctx_flag, "ERROR : Destination thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_dst ); 432 432 /* paranoid */ verify( thrd_dst->context.SP ); 433 433 /* paranoid */ verify( thrd_dst->curr_cluster == this->cltr ); … … 497 497 void returnToKernel() { 498 498 /* paranoid */ verify( ! __preemption_enabled() ); 499 $coroutine* proc_cor = get_coroutine(kernelTLS().this_processor->runner);500 $thread* thrd_src = kernelTLS().this_thread;499 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); 500 thread$ * thrd_src = kernelTLS().this_thread; 501 501 502 502 __STATS( thrd_src->last_proc = kernelTLS().this_processor; ) … … 526 526 527 527 /* paranoid */ verify( ! __preemption_enabled() ); 528 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning $thread%p has been corrupted.\n StackPointer too small.\n", thrd_src );529 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning $thread%p has been corrupted.\n StackPointer too large.\n", thrd_src );528 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) < ((uintptr_t)__get_stack(thrd_src->curr_cor)->base ) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too small.\n", thrd_src ); 529 /* paranoid */ verifyf( ((uintptr_t)thrd_src->context.SP) > ((uintptr_t)__get_stack(thrd_src->curr_cor)->limit) || thrd_src->corctx_flag, "ERROR : Returning thread$ %p has been corrupted.\n StackPointer too large.\n", thrd_src ); 530 530 } 531 531 … … 533 533 // Scheduler routines 534 534 // KERNEL ONLY 535 static void __schedule_thread( $thread* thrd ) {535 static void __schedule_thread( thread$ * thrd ) { 536 536 /* paranoid */ verify( ! __preemption_enabled() ); 537 537 /* paranoid */ verify( ready_schedule_islocked()); … … 583 583 } 584 584 585 void schedule_thread$( $thread* thrd ) {585 void schedule_thread$( thread$ * thrd ) { 586 586 ready_schedule_lock(); 587 587 __schedule_thread( thrd ); … … 590 590 591 591 // KERNEL ONLY 592 static inline $thread* __next_thread(cluster * this) with( *this ) {592 static inline thread$ * __next_thread(cluster * this) with( *this ) { 593 593 /* paranoid */ verify( ! __preemption_enabled() ); 594 594 595 595 ready_schedule_lock(); 596 $thread* thrd = pop_fast( this );596 thread$ * thrd = pop_fast( this ); 597 597 ready_schedule_unlock(); 598 598 … … 602 602 603 603 // KERNEL ONLY 604 static inline $thread* __next_thread_slow(cluster * this) with( *this ) {604 static inline thread$ * __next_thread_slow(cluster * this) with( *this ) { 605 605 /* paranoid */ verify( ! __preemption_enabled() ); 606 606 607 607 ready_schedule_lock(); 608 $thread* thrd;608 thread$ * thrd; 609 609 for(25) { 610 610 thrd = pop_slow( this ); … … 620 620 } 621 621 622 static inline bool __must_unpark( $thread* thrd ) {622 static inline bool __must_unpark( thread$ * thrd ) { 623 623 int old_ticket = __atomic_fetch_add(&thrd->ticket, 1, __ATOMIC_SEQ_CST); 624 624 switch(old_ticket) { … … 636 636 } 637 637 638 void __kernel_unpark( $thread* thrd ) {638 void __kernel_unpark( thread$ * thrd ) { 639 639 /* paranoid */ verify( ! __preemption_enabled() ); 640 640 /* paranoid */ verify( ready_schedule_islocked()); … … 651 651 } 652 652 653 void unpark( $thread* thrd ) {653 void unpark( thread$ * thrd ) { 654 654 if( !thrd ) return; 655 655 … … 675 675 // Should never return 676 676 void __cfactx_thrd_leave() { 677 $thread* thrd = active_thread();678 $monitor* this = &thrd->self_mon;677 thread$ * thrd = active_thread(); 678 monitor$ * this = &thrd->self_mon; 679 679 680 680 // Lock the monitor now … … 688 688 /* paranoid */ verify( kernelTLS().this_thread == thrd ); 689 689 /* paranoid */ verify( thrd->context.SP ); 690 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread%p has been corrupted.\n StackPointer too large.\n", thrd );691 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread%p has been corrupted.\n StackPointer too small.\n", thrd );690 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); 691 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 692 692 693 693 thrd->state = Halting; … … 707 707 bool force_yield( __Preemption_Reason reason ) { 708 708 __disable_interrupts_checked(); 709 $thread* thrd = kernelTLS().this_thread;709 thread$ * thrd = kernelTLS().this_thread; 710 710 /* paranoid */ verify(thrd->state == Active); 711 711 … … 819 819 //============================================================================================= 820 820 void __kernel_abort_msg( char * abort_text, int abort_text_size ) { 821 $thread* thrd = __cfaabi_tls.this_thread;821 thread$ * thrd = __cfaabi_tls.this_thread; 822 822 823 823 if(thrd) { -
libcfa/src/concurrency/kernel.hfa
ree23a8d re84ab3d 115 115 // it is not a particularly safe scheme as it can make processors less homogeneous 116 116 struct { 117 $thread* thrd;117 thread$ * thrd; 118 118 } init; 119 119 … … 215 215 // List of threads 216 216 __spinlock_t thread_list_lock; 217 __dllist_t(struct $thread) threads;217 __dllist_t(struct thread$) threads; 218 218 unsigned int nthreads; 219 219 -
libcfa/src/concurrency/kernel/fwd.hfa
ree23a8d re84ab3d 24 24 #endif 25 25 26 struct $thread;26 struct thread$; 27 27 struct processor; 28 28 struct cluster; … … 36 36 extern "Cforall" { 37 37 extern __attribute__((aligned(128))) thread_local struct KernelThreadData { 38 struct $thread* volatile this_thread;38 struct thread$ * volatile this_thread; 39 39 struct processor * volatile this_processor; 40 40 volatile bool sched_lock; … … 120 120 extern "Cforall" { 121 121 extern void park( void ); 122 extern void unpark( struct $thread* this );123 static inline struct $thread* active_thread () {124 struct $thread* t = publicTLS_get( this_thread );122 extern void unpark( struct thread$ * this ); 123 static inline struct thread$ * active_thread () { 124 struct thread$ * t = publicTLS_get( this_thread ); 125 125 /* paranoid */ verify( t ); 126 126 return t; … … 144 144 // Semaphore which only supports a single thread 145 145 struct single_sem { 146 struct $thread* volatile ptr;146 struct thread$ * volatile ptr; 147 147 }; 148 148 … … 156 156 bool wait(single_sem & this) { 157 157 for() { 158 struct $thread* expected = this.ptr;158 struct thread$ * expected = this.ptr; 159 159 if(expected == 1p) { 160 160 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { … … 175 175 bool post(single_sem & this) { 176 176 for() { 177 struct $thread* expected = this.ptr;177 struct thread$ * expected = this.ptr; 178 178 if(expected == 1p) return false; 179 179 if(expected == 0p) { … … 200 200 // 1p : fulfilled (wait won't block) 201 201 // any thread : a thread is currently waiting 202 struct $thread* volatile ptr;202 struct thread$ * volatile ptr; 203 203 }; 204 204 … … 214 214 bool wait(oneshot & this) { 215 215 for() { 216 struct $thread* expected = this.ptr;216 struct thread$ * expected = this.ptr; 217 217 if(expected == 1p) return false; 218 218 /* paranoid */ verify( expected == 0p ); … … 227 227 // Mark as fulfilled, wake thread if needed 228 228 // return true if a thread was unparked 229 $thread* post(oneshot & this, bool do_unpark = true) {230 struct $thread* got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST);229 thread$ * post(oneshot & this, bool do_unpark = true) { 230 struct thread$ * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 231 231 if( got == 0p ) return 0p; 232 232 if(do_unpark) unpark( got ); … … 343 343 // from the server side, mark the future as fulfilled 344 344 // delete it if needed 345 $thread* fulfil( future_t & this, bool do_unpark = true ) {345 thread$ * fulfil( future_t & this, bool do_unpark = true ) { 346 346 for() { 347 347 struct oneshot * expected = this.ptr; … … 364 364 if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 365 365 if( expected == 0p ) { /* paranoid */ verify( this.ptr == 1p); return 0p; } 366 $thread* ret = post( *expected, do_unpark );366 thread$ * ret = post( *expected, do_unpark ); 367 367 __atomic_store_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 368 368 return ret; -
libcfa/src/concurrency/kernel/startup.cfa
ree23a8d re84ab3d 77 77 static void __kernel_first_resume( processor * this ); 78 78 static void __kernel_last_resume ( processor * this ); 79 static void init(processor & this, const char name[], cluster & _cltr, $thread* initT);79 static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT); 80 80 static void deinit(processor & this); 81 81 static void doregister( struct cluster & cltr ); … … 83 83 static void register_tls( processor * this ); 84 84 static void unregister_tls( processor * this ); 85 static void ?{}( $coroutine& this, current_stack_info_t * info);86 static void ?{}( $thread& this, current_stack_info_t * info);85 static void ?{}( coroutine$ & this, current_stack_info_t * info); 86 static void ?{}( thread$ & this, current_stack_info_t * info); 87 87 static void ?{}(processorCtx_t & this) {} 88 88 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info); … … 105 105 KERNEL_STORAGE(cluster, mainCluster); 106 106 KERNEL_STORAGE(processor, mainProcessor); 107 KERNEL_STORAGE( $thread, mainThread);107 KERNEL_STORAGE(thread$, mainThread); 108 108 KERNEL_STORAGE(__stack_t, mainThreadCtx); 109 109 KERNEL_STORAGE(__scheduler_RWLock_t, __scheduler_lock); … … 114 114 cluster * mainCluster; 115 115 processor * mainProcessor; 116 $thread* mainThread;116 thread$ * mainThread; 117 117 __scheduler_RWLock_t * __scheduler_lock; 118 118 … … 203 203 // SKULLDUGGERY: the mainThread steals the process main thread 204 204 // which will then be scheduled by the mainProcessor normally 205 mainThread = ( $thread*)&storage_mainThread;205 mainThread = (thread$ *)&storage_mainThread; 206 206 current_stack_info_t info; 207 207 info.storage = (__stack_t*)&storage_mainThreadCtx; … … 397 397 398 398 static void __kernel_first_resume( processor * this ) { 399 $thread* src = mainThread;400 $coroutine* dst = get_coroutine(this->runner);399 thread$ * src = mainThread; 400 coroutine$ * dst = get_coroutine(this->runner); 401 401 402 402 /* paranoid */ verify( ! __preemption_enabled() ); … … 430 430 // KERNEL_ONLY 431 431 static void __kernel_last_resume( processor * this ) { 432 $coroutine* src = &mainThread->self_cor;433 $coroutine* dst = get_coroutine(this->runner);432 coroutine$ * src = &mainThread->self_cor; 433 coroutine$ * dst = get_coroutine(this->runner); 434 434 435 435 /* paranoid */ verify( ! __preemption_enabled() ); … … 459 459 //----------------------------------------------------------------------------- 460 460 // Main thread construction 461 static void ?{}( $coroutine& this, current_stack_info_t * info) with( this ) {461 static void ?{}( coroutine$ & this, current_stack_info_t * info) with( this ) { 462 462 stack.storage = info->storage; 463 463 with(*stack.storage) { … … 474 474 } 475 475 476 static void ?{}( $thread& this, current_stack_info_t * info) with( this ) {476 static void ?{}( thread$ & this, current_stack_info_t * info) with( this ) { 477 477 ticket = TICKET_RUNNING; 478 478 state = Start; … … 506 506 } 507 507 508 static void init(processor & this, const char name[], cluster & _cltr, $thread* initT) with( this ) {508 static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT) with( this ) { 509 509 this.name = name; 510 510 this.cltr = &_cltr; … … 545 545 } 546 546 547 void ?{}(processor & this, const char name[], cluster & _cltr, $thread* initT) {547 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) { 548 548 ( this.terminated ){}; 549 549 ( this.runner ){}; … … 663 663 } 664 664 665 void doregister( cluster * cltr, $thread& thrd ) {665 void doregister( cluster * cltr, thread$ & thrd ) { 666 666 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 667 667 cltr->nthreads += 1; … … 670 670 } 671 671 672 void unregister( cluster * cltr, $thread& thrd ) {672 void unregister( cluster * cltr, thread$ & thrd ) { 673 673 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 674 674 remove(cltr->threads, thrd ); -
libcfa/src/concurrency/kernel_private.hfa
ree23a8d re84ab3d 46 46 } 47 47 48 void schedule_thread$( $thread* ) __attribute__((nonnull (1)));48 void schedule_thread$( thread$ * ) __attribute__((nonnull (1))); 49 49 50 50 extern bool __preemption_enabled(); 51 51 52 52 //release/wake-up the following resources 53 void __thread_finish( $thread* thrd );53 void __thread_finish( thread$ * thrd ); 54 54 55 55 //----------------------------------------------------------------------------- … … 95 95 96 96 __cfaabi_dbg_debug_do( 97 extern void __cfaabi_dbg_thread_register ( $thread* thrd );98 extern void __cfaabi_dbg_thread_unregister( $thread* thrd );97 extern void __cfaabi_dbg_thread_register ( thread$ * thrd ); 98 extern void __cfaabi_dbg_thread_unregister( thread$ * thrd ); 99 99 ) 100 100 … … 105 105 //----------------------------------------------------------------------------- 106 106 // Utils 107 void doregister( struct cluster * cltr, struct $thread& thrd );108 void unregister( struct cluster * cltr, struct $thread& thrd );107 void doregister( struct cluster * cltr, struct thread$ & thrd ); 108 void unregister( struct cluster * cltr, struct thread$ & thrd ); 109 109 110 110 //----------------------------------------------------------------------------- … … 300 300 // push thread onto a ready queue for a cluster 301 301 // returns true if the list was previously empty, false otherwise 302 __attribute__((hot)) void push(struct cluster * cltr, struct $thread* thrd, bool local);302 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool local); 303 303 304 304 //----------------------------------------------------------------------- … … 306 306 // returns 0p if empty 307 307 // May return 0p spuriously 308 __attribute__((hot)) struct $thread* pop_fast(struct cluster * cltr);308 __attribute__((hot)) struct thread$ * pop_fast(struct cluster * cltr); 309 309 310 310 //----------------------------------------------------------------------- … … 312 312 // returns 0p if empty 313 313 // May return 0p spuriously 314 __attribute__((hot)) struct $thread* pop_slow(struct cluster * cltr);314 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr); 315 315 316 316 //----------------------------------------------------------------------- … … 318 318 // returns 0p if empty 319 319 // guaranteed to find any threads added before this call 320 __attribute__((hot)) struct $thread* pop_search(struct cluster * cltr);320 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr); 321 321 322 322 //----------------------------------------------------------------------- -
libcfa/src/concurrency/locks.cfa
ree23a8d re84ab3d 32 32 33 33 // waiting thread 34 struct $thread* t;34 struct thread$ * t; 35 35 36 36 // shadow field … … 45 45 P9_EMBEDDED( info_thread(L), dlink(info_thread(L)) ) 46 46 47 void ?{}( info_thread(L) & this, $thread* t, uintptr_t info, L * l ) {47 void ?{}( info_thread(L) & this, thread$ * t, uintptr_t info, L * l ) { 48 48 this.t = t; 49 49 this.info = info; … … 71 71 void lock( blocking_lock & this ) with( this ) { 72 72 lock( lock __cfaabi_dbg_ctx2 ); 73 $thread* thrd = active_thread();73 thread$ * thrd = active_thread(); 74 74 75 75 // single acquisition lock is held by current thread … … 117 117 118 118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 119 $thread* t = &try_pop_front( blocked_threads );119 thread$ * t = &try_pop_front( blocked_threads ); 120 120 owner = t; 121 121 recursion_count = ( t ? 1 : 0 ); … … 142 142 } 143 143 144 void on_notify( blocking_lock & this, $thread* t ) with( this ) {144 void on_notify( blocking_lock & this, thread$ * t ) with( this ) { 145 145 lock( lock __cfaabi_dbg_ctx2 ); 146 146 // lock held … … 366 366 } 367 367 368 $thread* V (semaphore & this, const bool doUnpark ) with( this ) {369 $thread* thrd = 0p;368 thread$ * V (semaphore & this, const bool doUnpark ) with( this ) { 369 thread$ * thrd = 0p; 370 370 lock( lock __cfaabi_dbg_ctx2 ); 371 371 count += 1; … … 384 384 385 385 bool V(semaphore & this) with( this ) { 386 $thread* thrd = V(this, true);386 thread$ * thrd = V(this, true); 387 387 return thrd != 0p; 388 388 } 389 389 390 390 bool V(semaphore & this, unsigned diff) with( this ) { 391 $thread* thrd = 0p;391 thread$ * thrd = 0p; 392 392 lock( lock __cfaabi_dbg_ctx2 ); 393 393 int release = max(-count, (int)diff); -
libcfa/src/concurrency/locks.hfa
ree23a8d re84ab3d 39 39 struct Semaphore0nary { 40 40 __spinlock_t lock; // needed to protect 41 mpsc_queue( $thread) queue;42 }; 43 44 static inline bool P(Semaphore0nary & this, $thread* thrd) {41 mpsc_queue(thread$) queue; 42 }; 43 44 static inline bool P(Semaphore0nary & this, thread$ * thrd) { 45 45 /* paranoid */ verify(!thrd`next); 46 46 /* paranoid */ verify(!(&(*thrd)`next)); … … 51 51 52 52 static inline bool P(Semaphore0nary & this) { 53 $thread* thrd = active_thread();53 thread$ * thrd = active_thread(); 54 54 P(this, thrd); 55 55 park(); … … 57 57 } 58 58 59 static inline $thread* V(Semaphore0nary & this, bool doUnpark = true) {60 $thread* next;59 static inline thread$ * V(Semaphore0nary & this, bool doUnpark = true) { 60 thread$ * next; 61 61 lock(this.lock __cfaabi_dbg_ctx2); 62 62 for (;;) { … … 124 124 static inline bool P(ThreadBenaphore & this, bool wait) { return wait ? P(this) : tryP(this); } 125 125 126 static inline $thread* V(ThreadBenaphore & this, bool doUnpark = true) {126 static inline thread$ * V(ThreadBenaphore & this, bool doUnpark = true) { 127 127 if (V(this.ben)) return 0p; 128 128 return V(this.sem, doUnpark); … … 134 134 __spinlock_t lock; 135 135 int count; 136 __queue_t( $thread) waiting;136 __queue_t(thread$) waiting; 137 137 }; 138 138 … … 142 142 bool V (semaphore & this); 143 143 bool V (semaphore & this, unsigned count); 144 $thread* V (semaphore & this, bool );144 thread$ * V (semaphore & this, bool ); 145 145 146 146 //---------- … … 156 156 static inline size_t on_wait ( single_acquisition_lock & this ) { return on_wait ( (blocking_lock &)this ); } 157 157 static inline void on_wakeup( single_acquisition_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 158 static inline void on_notify( single_acquisition_lock & this, struct $thread* t ) { on_notify( (blocking_lock &)this, t ); }158 static inline void on_notify( single_acquisition_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 159 159 160 160 //---------- … … 170 170 static inline size_t on_wait ( owner_lock & this ) { return on_wait ( (blocking_lock &)this ); } 171 171 static inline void on_wakeup( owner_lock & this, size_t v ) { on_wakeup ( (blocking_lock &)this, v ); } 172 static inline void on_notify( owner_lock & this, struct $thread* t ) { on_notify( (blocking_lock &)this, t ); }172 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 173 173 174 174 struct fast_lock { 175 $thread* volatile owner;175 thread$ * volatile owner; 176 176 ThreadBenaphore sem; 177 177 }; … … 179 179 static inline void ?{}(fast_lock & this) { this.owner = 0p; } 180 180 181 static inline bool $try_lock(fast_lock & this, $thread* thrd) {182 $thread* exp = 0p;181 static inline bool $try_lock(fast_lock & this, thread$ * thrd) { 182 thread$ * exp = 0p; 183 183 return __atomic_compare_exchange_n(&this.owner, &exp, thrd, false, __ATOMIC_SEQ_CST, __ATOMIC_RELAXED); 184 184 } … … 186 186 static inline void lock( fast_lock & this ) __attribute__((artificial)); 187 187 static inline void lock( fast_lock & this ) { 188 $thread* thrd = active_thread();188 thread$ * thrd = active_thread(); 189 189 /* paranoid */verify(thrd != this.owner); 190 190 … … 197 197 static inline bool try_lock( fast_lock & this ) __attribute__((artificial)); 198 198 static inline bool try_lock ( fast_lock & this ) { 199 $thread* thrd = active_thread();199 thread$ * thrd = active_thread(); 200 200 /* paranoid */ verify(thrd != this.owner); 201 201 return $try_lock(this, thrd); 202 202 } 203 203 204 static inline $thread* unlock( fast_lock & this ) __attribute__((artificial));205 static inline $thread* unlock( fast_lock & this ) {204 static inline thread$ * unlock( fast_lock & this ) __attribute__((artificial)); 205 static inline thread$ * unlock( fast_lock & this ) { 206 206 /* paranoid */ verify(active_thread() == this.owner); 207 207 … … 216 216 static inline size_t on_wait( fast_lock & this ) { unlock(this); return 0; } 217 217 static inline void on_wakeup( fast_lock & this, size_t ) { lock(this); } 218 static inline void on_notify( fast_lock &, struct $thread* t ) { unpark(t); }218 static inline void on_notify( fast_lock &, struct thread$ * t ) { unpark(t); } 219 219 220 220 struct mcs_node { … … 248 248 249 249 // Current thread owning the lock 250 struct $thread* owner;250 struct thread$ * owner; 251 251 252 252 // List of blocked threads 253 dlist( $thread) blocked_threads;253 dlist( thread$ ) blocked_threads; 254 254 255 255 // Used for comparing and exchanging … … 342 342 // block until signalled 343 343 while (block(this)) if(try_lock_contention(this)) return true; 344 344 345 345 // this should never be reached as block(this) always returns true 346 346 return false; … … 384 384 // block until signalled 385 385 while (block(this)) if(try_lock_contention(this)) return true; 386 386 387 387 // this should never be reached as block(this) always returns true 388 388 return false; … … 394 394 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 395 395 lock( spinlock __cfaabi_dbg_ctx2 ); 396 $thread* t = &try_pop_front( blocked_threads );396 thread$ * t = &try_pop_front( blocked_threads ); 397 397 unlock( spinlock ); 398 398 unpark( t ); 399 399 } 400 400 401 static inline void on_notify(linear_backoff_then_block_lock & this, struct $thread* t ) { unpark(t); }401 static inline void on_notify(linear_backoff_then_block_lock & this, struct thread$ * t ) { unpark(t); } 402 402 static inline size_t on_wait(linear_backoff_then_block_lock & this) { unlock(this); return 0; } 403 403 static inline void on_wakeup(linear_backoff_then_block_lock & this, size_t recursion ) { lock_improved(this); } … … 407 407 trait is_blocking_lock(L & | sized(L)) { 408 408 // For synchronization locks to use when acquiring 409 void on_notify( L &, struct $thread* );409 void on_notify( L &, struct thread$ * ); 410 410 411 411 // For synchronization locks to use when releasing … … 441 441 int count; 442 442 }; 443 443 444 444 445 445 void ?{}( condition_variable(L) & this ); -
libcfa/src/concurrency/monitor.cfa
ree23a8d re84ab3d 5 5 // file "LICENCE" distributed with Cforall. 6 6 // 7 // $monitor.c--7 // monitor.cfa -- 8 8 // 9 9 // Author : Thierry Delisle … … 28 28 //----------------------------------------------------------------------------- 29 29 // Forward declarations 30 static inline void __set_owner ( $monitor * this, $thread* owner );31 static inline void __set_owner ( $monitor * storage [], __lock_size_t count, $thread* owner );32 static inline void set_mask ( $monitor* storage [], __lock_size_t count, const __waitfor_mask_t & mask );33 static inline void reset_mask( $monitor* this );34 35 static inline $thread * next_thread( $monitor* this );36 static inline bool is_accepted( $monitor* this, const __monitor_group_t & monitors );30 static inline void __set_owner ( monitor$ * this, thread$ * owner ); 31 static inline void __set_owner ( monitor$ * storage [], __lock_size_t count, thread$ * owner ); 32 static inline void set_mask ( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ); 33 static inline void reset_mask( monitor$ * this ); 34 35 static inline thread$ * next_thread( monitor$ * this ); 36 static inline bool is_accepted( monitor$ * this, const __monitor_group_t & monitors ); 37 37 38 38 static inline void lock_all ( __spinlock_t * locks [], __lock_size_t count ); 39 static inline void lock_all ( $monitor* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count );39 static inline void lock_all ( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ); 40 40 static inline void unlock_all( __spinlock_t * locks [], __lock_size_t count ); 41 static inline void unlock_all( $monitor* locks [], __lock_size_t count );42 43 static inline void save ( $monitor* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] );44 static inline void restore( $monitor* ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] );45 46 static inline void init ( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );47 static inline void init_push( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] );48 49 static inline $thread* check_condition ( __condition_criterion_t * );41 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ); 42 43 static inline void save ( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*out*/ recursions [], __waitfor_mask_t /*out*/ masks [] ); 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 46 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 48 49 static inline thread$ * check_condition ( __condition_criterion_t * ); 50 50 static inline void brand_condition ( condition & ); 51 static inline [ $thread *, int] search_entry_queue( const __waitfor_mask_t &, $monitor* monitors [], __lock_size_t count );51 static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t &, monitor$ * monitors [], __lock_size_t count ); 52 52 53 53 forall(T & | sized( T )) 54 54 static inline __lock_size_t insert_unique( T * array [], __lock_size_t & size, T * val ); 55 55 static inline __lock_size_t count_max ( const __waitfor_mask_t & mask ); 56 static inline __lock_size_t aggregate ( $monitor* storage [], const __waitfor_mask_t & mask );56 static inline __lock_size_t aggregate ( monitor$ * storage [], const __waitfor_mask_t & mask ); 57 57 58 58 //----------------------------------------------------------------------------- … … 69 69 70 70 #define monitor_ctx( mons, cnt ) /* Define that create the necessary struct for internal/external scheduling operations */ \ 71 $monitor** monitors = mons; /* Save the targeted monitors */ \71 monitor$ ** monitors = mons; /* Save the targeted monitors */ \ 72 72 __lock_size_t count = cnt; /* Save the count to a local variable */ \ 73 73 unsigned int recursions[ count ]; /* Save the current recursion levels to restore them later */ \ … … 82 82 // Enter/Leave routines 83 83 // Enter single monitor 84 static void __enter( $monitor* this, const __monitor_group_t & group ) {85 $thread* thrd = active_thread();84 static void __enter( monitor$ * this, const __monitor_group_t & group ) { 85 thread$ * thrd = active_thread(); 86 86 87 87 // Lock the monitor spinlock … … 141 141 } 142 142 143 static void __dtor_enter( $monitor* this, fptr_t func, bool join ) {144 $thread* thrd = active_thread();143 static void __dtor_enter( monitor$ * this, fptr_t func, bool join ) { 144 thread$ * thrd = active_thread(); 145 145 #if defined( __CFA_WITH_VERIFY__ ) 146 146 bool is_thrd = this == &thrd->self_mon; … … 173 173 // because join will not release the monitor after it executed. 174 174 // to avoid that it sets the owner to the special value thrd | 1p before exiting 175 else if( this->owner == ( $thread*)(1 | (uintptr_t)thrd) ) {175 else if( this->owner == (thread$*)(1 | (uintptr_t)thrd) ) { 176 176 // restore the owner and just return 177 177 __cfaabi_dbg_print_safe( "Kernel : Destroying free mon %p\n", this); … … 191 191 192 192 __lock_size_t count = 1; 193 $monitor** monitors = &this;193 monitor$ ** monitors = &this; 194 194 __monitor_group_t group = { &this, 1, func }; 195 195 if( is_accepted( this, group) ) { … … 243 243 244 244 // Leave single monitor 245 void __leave( $monitor* this ) {245 void __leave( monitor$ * this ) { 246 246 // Lock the monitor spinlock 247 247 lock( this->lock __cfaabi_dbg_ctx2 ); … … 263 263 264 264 // Get the next thread, will be null on low contention monitor 265 $thread* new_owner = next_thread( this );265 thread$ * new_owner = next_thread( this ); 266 266 267 267 // Check the new owner is consistent with who we wake-up … … 278 278 279 279 // Leave single monitor for the last time 280 void __dtor_leave( $monitor* this, bool join ) {280 void __dtor_leave( monitor$ * this, bool join ) { 281 281 __cfaabi_dbg_debug_do( 282 282 if( active_thread() != this->owner ) { … … 288 288 ) 289 289 290 this->owner = ( $thread*)(1 | (uintptr_t)this->owner);291 } 292 293 void __thread_finish( $thread* thrd ) {294 $monitor* this = &thrd->self_mon;290 this->owner = (thread$*)(1 | (uintptr_t)this->owner); 291 } 292 293 void __thread_finish( thread$ * thrd ) { 294 monitor$ * this = &thrd->self_mon; 295 295 296 296 // Lock the monitor now … … 298 298 /* paranoid */ verify( this->lock.lock ); 299 299 /* paranoid */ verify( thrd->context.SP ); 300 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : $thread%p has been corrupted.\n StackPointer too large.\n", thrd );301 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : $thread%p has been corrupted.\n StackPointer too small.\n", thrd );300 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) > ((uintptr_t)__get_stack(thrd->curr_cor)->limit), "ERROR : thread$ %p has been corrupted.\n StackPointer too large.\n", thrd ); 301 /* paranoid */ verifyf( ((uintptr_t)thrd->context.SP) < ((uintptr_t)__get_stack(thrd->curr_cor)->base ), "ERROR : thread$ %p has been corrupted.\n StackPointer too small.\n", thrd ); 302 302 /* paranoid */ verify( ! __preemption_enabled() ); 303 303 … … 311 311 312 312 // Fetch the next thread, can be null 313 $thread* new_owner = next_thread( this );313 thread$ * new_owner = next_thread( this ); 314 314 315 315 // Mark the state as fully halted … … 336 336 // Leave multiple monitor 337 337 // relies on the monitor array being sorted 338 static inline void leave( $monitor* monitors [], __lock_size_t count) {338 static inline void leave(monitor$ * monitors [], __lock_size_t count) { 339 339 for( __lock_size_t i = count - 1; i >= 0; i--) { 340 340 __leave( monitors[i] ); … … 344 344 // Ctor for monitor guard 345 345 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, $monitor* m [], __lock_size_t count, fptr_t func ) {347 $thread* thrd = active_thread();346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) { 347 thread$ * thrd = active_thread(); 348 348 349 349 // Store current array … … 385 385 // Ctor for monitor guard 386 386 // Sorts monitors before entering 387 void ?{}( monitor_dtor_guard_t & this, $monitor* m [], fptr_t func, bool join ) {387 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) { 388 388 // optimization 389 $thread* thrd = active_thread();389 thread$ * thrd = active_thread(); 390 390 391 391 // Store current array … … 415 415 //----------------------------------------------------------------------------- 416 416 // Internal scheduling types 417 void ?{}(__condition_node_t & this, $thread* waiting_thread, __lock_size_t count, uintptr_t user_info ) {417 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 418 418 this.waiting_thread = waiting_thread; 419 419 this.count = count; … … 429 429 } 430 430 431 void ?{}(__condition_criterion_t & this, $monitor* target, __condition_node_t & owner ) {431 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 432 432 this.ready = false; 433 433 this.target = target; … … 463 463 // Find the next thread(s) to run 464 464 __lock_size_t thread_count = 0; 465 $thread* threads[ count ];465 thread$ * threads[ count ]; 466 466 __builtin_memset( threads, 0, sizeof( threads ) ); 467 467 … … 471 471 // Remove any duplicate threads 472 472 for( __lock_size_t i = 0; i < count; i++) { 473 $thread* new_owner = next_thread( monitors[i] );473 thread$ * new_owner = next_thread( monitors[i] ); 474 474 insert_unique( threads, thread_count, new_owner ); 475 475 } … … 501 501 //Some more checking in debug 502 502 __cfaabi_dbg_debug_do( 503 $thread* this_thrd = active_thread();503 thread$ * this_thrd = active_thread(); 504 504 if ( this.monitor_count != this_thrd->monitors.size ) { 505 505 abort( "Signal on condition %p made with different number of monitor(s), expected %zi got %zi", &this, this.monitor_count, this_thrd->monitors.size ); … … 555 555 556 556 //Find the thread to run 557 $thread* signallee = pop_head( this.blocked )->waiting_thread;557 thread$ * signallee = pop_head( this.blocked )->waiting_thread; 558 558 __set_owner( monitors, count, signallee ); 559 559 … … 608 608 // Create one! 609 609 __lock_size_t max = count_max( mask ); 610 $monitor* mon_storage[max];610 monitor$ * mon_storage[max]; 611 611 __builtin_memset( mon_storage, 0, sizeof( mon_storage ) ); 612 612 __lock_size_t actual_count = aggregate( mon_storage, mask ); … … 626 626 { 627 627 // Check if the entry queue 628 $thread* next; int index;628 thread$ * next; int index; 629 629 [next, index] = search_entry_queue( mask, monitors, count ); 630 630 … … 636 636 verifyf( accepted.size == 1, "ERROR: Accepted dtor has more than 1 mutex parameter." ); 637 637 638 $monitor* mon2dtor = accepted[0];638 monitor$ * mon2dtor = accepted[0]; 639 639 verifyf( mon2dtor->dtor_node, "ERROR: Accepted monitor has no dtor_node." ); 640 640 … … 730 730 // Utilities 731 731 732 static inline void __set_owner( $monitor * this, $thread* owner ) {732 static inline void __set_owner( monitor$ * this, thread$ * owner ) { 733 733 /* paranoid */ verify( this->lock.lock ); 734 734 … … 740 740 } 741 741 742 static inline void __set_owner( $monitor * monitors [], __lock_size_t count, $thread* owner ) {742 static inline void __set_owner( monitor$ * monitors [], __lock_size_t count, thread$ * owner ) { 743 743 /* paranoid */ verify ( monitors[0]->lock.lock ); 744 744 /* paranoid */ verifyf( monitors[0]->owner == active_thread(), "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), monitors[0]->owner, monitors[0]->recursion, monitors[0] ); … … 753 753 } 754 754 755 static inline void set_mask( $monitor* storage [], __lock_size_t count, const __waitfor_mask_t & mask ) {755 static inline void set_mask( monitor$ * storage [], __lock_size_t count, const __waitfor_mask_t & mask ) { 756 756 for( __lock_size_t i = 0; i < count; i++) { 757 757 storage[i]->mask = mask; … … 759 759 } 760 760 761 static inline void reset_mask( $monitor* this ) {761 static inline void reset_mask( monitor$ * this ) { 762 762 this->mask.accepted = 0p; 763 763 this->mask.data = 0p; … … 765 765 } 766 766 767 static inline $thread * next_thread( $monitor* this ) {767 static inline thread$ * next_thread( monitor$ * this ) { 768 768 //Check the signaller stack 769 769 __cfaabi_dbg_print_safe( "Kernel : mon %p AS-stack top %p\n", this, this->signal_stack.top); … … 781 781 // No signaller thread 782 782 // Get the next thread in the entry_queue 783 $thread* new_owner = pop_head( this->entry_queue );783 thread$ * new_owner = pop_head( this->entry_queue ); 784 784 /* paranoid */ verifyf( !this->owner || active_thread() == this->owner, "Expected owner to be %p, got %p (r: %i, m: %p)", active_thread(), this->owner, this->recursion, this ); 785 785 /* paranoid */ verify( !new_owner || new_owner->link.next == 0p ); … … 789 789 } 790 790 791 static inline bool is_accepted( $monitor* this, const __monitor_group_t & group ) {791 static inline bool is_accepted( monitor$ * this, const __monitor_group_t & group ) { 792 792 __acceptable_t * it = this->mask.data; // Optim 793 793 __lock_size_t count = this->mask.size; … … 811 811 } 812 812 813 static inline void init( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {813 static inline void init( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 814 814 for( __lock_size_t i = 0; i < count; i++) { 815 815 (criteria[i]){ monitors[i], waiter }; … … 819 819 } 820 820 821 static inline void init_push( __lock_size_t count, $monitor* monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) {821 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ) { 822 822 for( __lock_size_t i = 0; i < count; i++) { 823 823 (criteria[i]){ monitors[i], waiter }; … … 835 835 } 836 836 837 static inline void lock_all( $monitor* source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) {837 static inline void lock_all( monitor$ * source [], __spinlock_t * /*out*/ locks [], __lock_size_t count ) { 838 838 for( __lock_size_t i = 0; i < count; i++ ) { 839 839 __spinlock_t * l = &source[i]->lock; … … 849 849 } 850 850 851 static inline void unlock_all( $monitor* locks [], __lock_size_t count ) {851 static inline void unlock_all( monitor$ * locks [], __lock_size_t count ) { 852 852 for( __lock_size_t i = 0; i < count; i++ ) { 853 853 unlock( locks[i]->lock ); … … 856 856 857 857 static inline void save( 858 $monitor* ctx [],858 monitor$ * ctx [], 859 859 __lock_size_t count, 860 860 __attribute((unused)) __spinlock_t * locks [], … … 869 869 870 870 static inline void restore( 871 $monitor* ctx [],871 monitor$ * ctx [], 872 872 __lock_size_t count, 873 873 __spinlock_t * locks [], … … 887 887 // 2 - Checks if all the monitors are ready to run 888 888 // if so return the thread to run 889 static inline $thread* check_condition( __condition_criterion_t * target ) {889 static inline thread$ * check_condition( __condition_criterion_t * target ) { 890 890 __condition_node_t * node = target->owner; 891 891 unsigned short count = node->count; … … 910 910 911 911 static inline void brand_condition( condition & this ) { 912 $thread* thrd = active_thread();912 thread$ * thrd = active_thread(); 913 913 if( !this.monitors ) { 914 914 // __cfaabi_dbg_print_safe( "Branding\n" ); … … 916 916 this.monitor_count = thrd->monitors.size; 917 917 918 this.monitors = ( $monitor**)malloc( this.monitor_count * sizeof( *this.monitors ) );918 this.monitors = (monitor$ **)malloc( this.monitor_count * sizeof( *this.monitors ) ); 919 919 for( int i = 0; i < this.monitor_count; i++ ) { 920 920 this.monitors[i] = thrd->monitors[i]; … … 923 923 } 924 924 925 static inline [ $thread *, int] search_entry_queue( const __waitfor_mask_t & mask, $monitor* monitors [], __lock_size_t count ) {926 927 __queue_t( $thread) & entry_queue = monitors[0]->entry_queue;925 static inline [thread$ *, int] search_entry_queue( const __waitfor_mask_t & mask, monitor$ * monitors [], __lock_size_t count ) { 926 927 __queue_t(thread$) & entry_queue = monitors[0]->entry_queue; 928 928 929 929 // For each thread in the entry-queue 930 for( $thread** thrd_it = &entry_queue.head;930 for( thread$ ** thrd_it = &entry_queue.head; 931 931 (*thrd_it) != 1p; 932 932 thrd_it = &(*thrd_it)->link.next … … 972 972 } 973 973 974 static inline __lock_size_t aggregate( $monitor* storage [], const __waitfor_mask_t & mask ) {974 static inline __lock_size_t aggregate( monitor$ * storage [], const __waitfor_mask_t & mask ) { 975 975 __lock_size_t size = 0; 976 976 for( __lock_size_t i = 0; i < mask.size; i++ ) { -
libcfa/src/concurrency/monitor.hfa
ree23a8d re84ab3d 23 23 24 24 trait is_monitor(T &) { 25 $monitor* get_monitor( T & );25 monitor$ * get_monitor( T & ); 26 26 void ^?{}( T & mutex ); 27 27 }; 28 28 29 static inline void ?{}( $monitor& this) with( this ) {29 static inline void ?{}(monitor$ & this) with( this ) { 30 30 lock{}; 31 31 entry_queue{}; … … 39 39 } 40 40 41 static inline void ^?{}( $monitor& ) {}41 static inline void ^?{}(monitor$ & ) {} 42 42 43 43 struct monitor_guard_t { 44 $monitor** m;44 monitor$ ** m; 45 45 __lock_size_t count; 46 46 __monitor_group_t prev; 47 47 }; 48 48 49 void ?{}( monitor_guard_t & this, $monitor** m, __lock_size_t count, void (*func)() );49 void ?{}( monitor_guard_t & this, monitor$ ** m, __lock_size_t count, void (*func)() ); 50 50 void ^?{}( monitor_guard_t & this ); 51 51 52 52 struct monitor_dtor_guard_t { 53 $monitor* m;53 monitor$ * m; 54 54 __monitor_group_t prev; 55 55 bool join; 56 56 }; 57 57 58 void ?{}( monitor_dtor_guard_t & this, $monitor** m, void (*func)(), bool join );58 void ?{}( monitor_dtor_guard_t & this, monitor$ ** m, void (*func)(), bool join ); 59 59 void ^?{}( monitor_dtor_guard_t & this ); 60 60 … … 73 73 74 74 // The monitor this criterion concerns 75 $monitor* target;75 monitor$ * target; 76 76 77 77 // The parent node to which this criterion belongs … … 88 88 struct __condition_node_t { 89 89 // Thread that needs to be woken when all criteria are met 90 $thread* waiting_thread;90 thread$ * waiting_thread; 91 91 92 92 // Array of criteria (Criterions are contiguous in memory) … … 107 107 } 108 108 109 void ?{}(__condition_node_t & this, $thread* waiting_thread, __lock_size_t count, uintptr_t user_info );109 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 110 110 void ?{}(__condition_criterion_t & this ); 111 void ?{}(__condition_criterion_t & this, $monitor* target, __condition_node_t * owner );111 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 112 112 113 113 struct condition { … … 116 116 117 117 // Array of monitor pointers (Monitors are NOT contiguous in memory) 118 $monitor** monitors;118 monitor$ ** monitors; 119 119 120 120 // Number of monitors in the array -
libcfa/src/concurrency/mutex.cfa
ree23a8d re84ab3d 122 122 recursion_count--; 123 123 if( recursion_count == 0 ) { 124 $thread* thrd = pop_head( blocked_threads );124 thread$ * thrd = pop_head( blocked_threads ); 125 125 owner = thrd; 126 126 recursion_count = (thrd ? 1 : 0); -
libcfa/src/concurrency/mutex.hfa
ree23a8d re84ab3d 36 36 37 37 // List of blocked threads 38 __queue_t(struct $thread) blocked_threads;38 __queue_t(struct thread$) blocked_threads; 39 39 40 40 // Locked flag … … 55 55 56 56 // List of blocked threads 57 __queue_t(struct $thread) blocked_threads;57 __queue_t(struct thread$) blocked_threads; 58 58 59 59 // Current thread owning the lock 60 struct $thread* owner;60 struct thread$ * owner; 61 61 62 62 // Number of recursion level … … 83 83 84 84 // List of blocked threads 85 __queue_t(struct $thread) blocked_threads;85 __queue_t(struct thread$) blocked_threads; 86 86 }; 87 87 -
libcfa/src/concurrency/preemption.cfa
ree23a8d re84ab3d 61 61 // FwdDeclarations : timeout handlers 62 62 static void preempt( processor * this ); 63 static void timeout( $thread* this );63 static void timeout( thread$ * this ); 64 64 65 65 // FwdDeclarations : Signal handlers … … 420 420 421 421 // reserved for future use 422 static void timeout( $thread* this ) {422 static void timeout( thread$ * this ) { 423 423 unpark( this ); 424 424 } -
libcfa/src/concurrency/ready_queue.cfa
ree23a8d re84ab3d 67 67 #endif 68 68 69 static inline struct $thread* try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats));70 static inline struct $thread* try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats));71 static inline struct $thread* search(struct cluster * cltr);69 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)); 70 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)); 71 static inline struct thread$ * search(struct cluster * cltr); 72 72 static inline [unsigned, bool] idx_from_r(unsigned r, unsigned preferred); 73 73 … … 274 274 //----------------------------------------------------------------------- 275 275 #if defined(USE_CPU_WORK_STEALING) 276 __attribute__((hot)) void push(struct cluster * cltr, struct $thread* thrd, bool push_local) with (cltr->ready_queue) {276 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) { 277 277 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 278 278 … … 316 316 317 317 // Pop from the ready queue from a given cluster 318 __attribute__((hot)) $thread* pop_fast(struct cluster * cltr) with (cltr->ready_queue) {318 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 319 319 /* paranoid */ verify( lanes.count > 0 ); 320 320 /* paranoid */ verify( kernelTLS().this_processor ); … … 371 371 proc->rdq.target = -1u; 372 372 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 373 $thread* t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));373 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 374 374 proc->rdq.last = target; 375 375 if(t) return t; … … 379 379 unsigned last = proc->rdq.last; 380 380 if(last != -1u && lanes.tscs[last].tv < cutoff && ts(lanes.data[last]) < cutoff) { 381 $thread* t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help));381 thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.help)); 382 382 if(t) return t; 383 383 } … … 389 389 for(READYQ_SHARD_FACTOR) { 390 390 unsigned i = start + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 391 if( $thread* t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;391 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 392 392 } 393 393 … … 396 396 } 397 397 398 __attribute__((hot)) struct $thread* pop_slow(struct cluster * cltr) with (cltr->ready_queue) {398 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 399 399 processor * const proc = kernelTLS().this_processor; 400 400 unsigned last = proc->rdq.last; 401 401 if(last != -1u) { 402 struct $thread* t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal));402 struct thread$ * t = try_pop(cltr, last __STATS(, __tls_stats()->ready.pop.steal)); 403 403 if(t) return t; 404 404 proc->rdq.last = -1u; … … 408 408 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 409 409 } 410 __attribute__((hot)) struct $thread* pop_search(struct cluster * cltr) {410 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 411 411 return search(cltr); 412 412 } … … 435 435 } 436 436 437 __attribute__((hot)) void push(struct cluster * cltr, struct $thread* thrd, bool push_local) with (cltr->ready_queue) {437 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) { 438 438 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 439 439 … … 482 482 483 483 // Pop from the ready queue from a given cluster 484 __attribute__((hot)) $thread* pop_fast(struct cluster * cltr) with (cltr->ready_queue) {484 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 485 485 /* paranoid */ verify( lanes.count > 0 ); 486 486 /* paranoid */ verify( kernelTLS().this_processor ); … … 506 506 507 507 // try popping from the 2 picked lists 508 struct $thread* thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help)));508 struct thread$ * thrd = try_pop(cltr, i, j __STATS(, *(locali || localj ? &__tls_stats()->ready.pop.local : &__tls_stats()->ready.pop.help))); 509 509 if(thrd) { 510 510 return thrd; … … 516 516 } 517 517 518 __attribute__((hot)) struct $thread* pop_slow(struct cluster * cltr) { return pop_fast(cltr); }519 __attribute__((hot)) struct $thread* pop_search(struct cluster * cltr) {518 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) { return pop_fast(cltr); } 519 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) { 520 520 return search(cltr); 521 521 } 522 522 #endif 523 523 #if defined(USE_WORK_STEALING) 524 __attribute__((hot)) void push(struct cluster * cltr, struct $thread* thrd, bool push_local) with (cltr->ready_queue) {524 __attribute__((hot)) void push(struct cluster * cltr, struct thread$ * thrd, bool push_local) with (cltr->ready_queue) { 525 525 __cfadbg_print_safe(ready_queue, "Kernel : Pushing %p on cluster %p\n", thrd, cltr); 526 526 … … 576 576 577 577 // Pop from the ready queue from a given cluster 578 __attribute__((hot)) $thread* pop_fast(struct cluster * cltr) with (cltr->ready_queue) {578 __attribute__((hot)) thread$ * pop_fast(struct cluster * cltr) with (cltr->ready_queue) { 579 579 /* paranoid */ verify( lanes.count > 0 ); 580 580 /* paranoid */ verify( kernelTLS().this_processor ); … … 598 598 const unsigned long long cutoff = proc->rdq.cutoff > bias ? proc->rdq.cutoff - bias : proc->rdq.cutoff; 599 599 if(lanes.tscs[target].tv < cutoff && ts(lanes.data[target]) < cutoff) { 600 $thread* t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help));600 thread$ * t = try_pop(cltr, target __STATS(, __tls_stats()->ready.pop.help)); 601 601 if(t) return t; 602 602 } … … 605 605 for(READYQ_SHARD_FACTOR) { 606 606 unsigned i = proc->rdq.id + (proc->rdq.itr++ % READYQ_SHARD_FACTOR); 607 if( $thread* t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t;607 if(thread$ * t = try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.local))) return t; 608 608 } 609 609 return 0p; 610 610 } 611 611 612 __attribute__((hot)) struct $thread* pop_slow(struct cluster * cltr) with (cltr->ready_queue) {612 __attribute__((hot)) struct thread$ * pop_slow(struct cluster * cltr) with (cltr->ready_queue) { 613 613 unsigned i = __tls_rand() % lanes.count; 614 614 return try_pop(cltr, i __STATS(, __tls_stats()->ready.pop.steal)); 615 615 } 616 616 617 __attribute__((hot)) struct $thread* pop_search(struct cluster * cltr) with (cltr->ready_queue) {617 __attribute__((hot)) struct thread$ * pop_search(struct cluster * cltr) with (cltr->ready_queue) { 618 618 return search(cltr); 619 619 } … … 628 628 //----------------------------------------------------------------------- 629 629 // try to pop from a lane given by index w 630 static inline struct $thread* try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {630 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned w __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) { 631 631 __STATS( stats.attempt++; ) 632 632 … … 651 651 652 652 // Actually pop the list 653 struct $thread* thrd;653 struct thread$ * thrd; 654 654 unsigned long long tsv; 655 655 [thrd, tsv] = pop(lane); … … 678 678 // try to pop from any lanes making sure you don't miss any threads push 679 679 // before the start of the function 680 static inline struct $thread* search(struct cluster * cltr) with (cltr->ready_queue) {680 static inline struct thread$ * search(struct cluster * cltr) with (cltr->ready_queue) { 681 681 /* paranoid */ verify( lanes.count > 0 ); 682 682 unsigned count = __atomic_load_n( &lanes.count, __ATOMIC_RELAXED ); … … 684 684 for(i; count) { 685 685 unsigned idx = (offset + i) % count; 686 struct $thread* thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search));686 struct thread$ * thrd = try_pop(cltr, idx __STATS(, __tls_stats()->ready.pop.search)); 687 687 if(thrd) { 688 688 return thrd; … … 719 719 //----------------------------------------------------------------------- 720 720 // Given 2 indexes, pick the list with the oldest push an try to pop from it 721 static inline struct $thread* try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) {721 static inline struct thread$ * try_pop(struct cluster * cltr, unsigned i, unsigned j __STATS(, __stats_readyQ_pop_t & stats)) with (cltr->ready_queue) { 722 722 // Pick the bet list 723 723 int w = i; … … 854 854 // As long as we can pop from this lane to push the threads somewhere else in the queue 855 855 while(!is_empty(lanes.data[idx])) { 856 struct $thread* thrd;856 struct thread$ * thrd; 857 857 unsigned long long _; 858 858 [thrd, _] = pop(lanes.data[idx]); -
libcfa/src/concurrency/ready_subqueue.hfa
ree23a8d re84ab3d 7 7 // Intrusives lanes which are used by the relaxed ready queue 8 8 struct __attribute__((aligned(128))) __intrusive_lane_t { 9 struct $thread* prev;9 struct thread$ * prev; 10 10 11 11 // spin lock protecting the queue … … 20 20 21 21 // Get the head pointer (one before the first element) from the anchor 22 static inline $thread* mock_head(const __intrusive_lane_t & this) {23 $thread * rhead = ($thread*)(24 (uintptr_t)( &this.anchor ) - __builtin_offsetof( $thread, link )22 static inline thread$ * mock_head(const __intrusive_lane_t & this) { 23 thread$ * rhead = (thread$ *)( 24 (uintptr_t)( &this.anchor ) - __builtin_offsetof( thread$, link ) 25 25 ); 26 26 return rhead; … … 38 38 39 39 // We add a boat-load of assertions here because the anchor code is very fragile 40 /* paranoid */ _Static_assert( offsetof( $thread, link ) == offsetof(__intrusive_lane_t, anchor) );41 /* paranoid */ verify( offsetof( $thread, link ) == offsetof(__intrusive_lane_t, anchor) );42 /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( $thread, link )) == (uintptr_t)(&this.anchor) );40 /* paranoid */ _Static_assert( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) ); 41 /* paranoid */ verify( offsetof( thread$, link ) == offsetof(__intrusive_lane_t, anchor) ); 42 /* paranoid */ verify( ((uintptr_t)( mock_head(this) ) + offsetof( thread$, link )) == (uintptr_t)(&this.anchor) ); 43 43 /* paranoid */ verify( &mock_head(this)->link.next == &this.anchor.next ); 44 44 /* paranoid */ verify( &mock_head(this)->link.ts == &this.anchor.ts ); … … 61 61 // Push a thread onto this lane 62 62 // returns true of lane was empty before push, false otherwise 63 static inline void push( __intrusive_lane_t & this, $thread* node ) {63 static inline void push( __intrusive_lane_t & this, thread$ * node ) { 64 64 /* paranoid */ verify( this.lock ); 65 65 /* paranoid */ verify( node->link.next == 0p ); … … 91 91 // returns popped 92 92 // returns true of lane was empty before push, false otherwise 93 static inline [* $thread, unsigned long long] pop( __intrusive_lane_t & this ) {93 static inline [* thread$, unsigned long long] pop( __intrusive_lane_t & this ) { 94 94 /* paranoid */ verify( this.lock ); 95 95 /* paranoid */ verify( this.anchor.next != 0p ); … … 99 99 // Get the relevant nodes locally 100 100 unsigned long long ts = this.anchor.ts; 101 $thread* node = this.anchor.next;101 thread$ * node = this.anchor.next; 102 102 this.anchor.next = node->link.next; 103 103 this.anchor.ts = node->link.ts; -
libcfa/src/concurrency/thread.cfa
ree23a8d re84ab3d 27 27 //----------------------------------------------------------------------------- 28 28 // Thread ctors and dtors 29 void ?{}( $thread& this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) {29 void ?{}(thread$ & this, const char * const name, cluster & cl, void * storage, size_t storageSize ) with( this ) { 30 30 context{ 0p, 0p }; 31 31 self_cor{ name, storage, storageSize }; … … 57 57 } 58 58 59 void ^?{}( $thread& this) with( this ) {59 void ^?{}(thread$& this) with( this ) { 60 60 #if defined( __CFA_WITH_VERIFY__ ) 61 61 canary = 0xDEADDEADDEADDEADp; … … 87 87 void ?{}( thread_dtor_guard_t & this, 88 88 T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) { 89 $monitor* m = get_monitor(thrd);90 $thread* desc = get_thread(thrd);89 monitor$ * m = get_monitor(thrd); 90 thread$ * desc = get_thread(thrd); 91 91 92 92 // Setup the monitor guard … … 130 130 forall( T & | is_thread(T) ) 131 131 void __thrd_start( T & this, void (*main_p)(T &) ) { 132 $thread* this_thrd = get_thread(this);132 thread$ * this_thrd = get_thread(this); 133 133 134 134 disable_interrupts(); -
libcfa/src/concurrency/thread.hfa
ree23a8d re84ab3d 29 29 void ^?{}(T& mutex this); 30 30 void main(T& this); 31 $thread* get_thread(T& this);31 thread$ * get_thread(T& this); 32 32 }; 33 33 … … 45 45 // Inline getters for threads/coroutines/monitors 46 46 forall( T & | is_thread(T) ) 47 static inline $coroutine* get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; }47 static inline coroutine$ * get_coroutine(T & this) __attribute__((const)) { return &get_thread(this)->self_cor; } 48 48 49 49 forall( T & | is_thread(T) ) 50 static inline $monitor* get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; }50 static inline monitor$ * get_monitor (T & this) __attribute__((const)) { return &get_thread(this)->self_mon; } 51 51 52 static inline $coroutine* get_coroutine($thread* this) __attribute__((const)) { return &this->self_cor; }53 static inline $monitor * get_monitor ($thread* this) __attribute__((const)) { return &this->self_mon; }52 static inline coroutine$ * get_coroutine(thread$ * this) __attribute__((const)) { return &this->self_cor; } 53 static inline monitor$ * get_monitor (thread$ * this) __attribute__((const)) { return &this->self_mon; } 54 54 55 55 //----------------------------------------------------------------------------- … … 62 62 //----------------------------------------------------------------------------- 63 63 // Ctors and dtors 64 void ?{}( $thread& this, const char * const name, struct cluster & cl, void * storage, size_t storageSize );65 void ^?{}( $thread& this);64 void ?{}(thread$ & this, const char * const name, struct cluster & cl, void * storage, size_t storageSize ); 65 void ^?{}(thread$ & this); 66 66 67 static inline void ?{}( $thread& this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; }68 static inline void ?{}( $thread& this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; }69 static inline void ?{}( $thread& this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; }70 static inline void ?{}( $thread& this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; }71 static inline void ?{}( $thread& this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; }72 static inline void ?{}( $thread& this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; }73 static inline void ?{}( $thread& this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; }74 static inline void ?{}( $thread& this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; }75 static inline void ?{}( $thread& this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; }67 static inline void ?{}(thread$ & this) { this{ "Anonymous Thread", *mainCluster, 0p, 65000 }; } 68 static inline void ?{}(thread$ & this, size_t stackSize ) { this{ "Anonymous Thread", *mainCluster, 0p, stackSize }; } 69 static inline void ?{}(thread$ & this, void * storage, size_t storageSize ) { this{ "Anonymous Thread", *mainCluster, storage, storageSize }; } 70 static inline void ?{}(thread$ & this, struct cluster & cl ) { this{ "Anonymous Thread", cl, 0p, 65000 }; } 71 static inline void ?{}(thread$ & this, struct cluster & cl, size_t stackSize ) { this{ "Anonymous Thread", cl, 0p, stackSize }; } 72 static inline void ?{}(thread$ & this, struct cluster & cl, void * storage, size_t storageSize ) { this{ "Anonymous Thread", cl, storage, storageSize }; } 73 static inline void ?{}(thread$ & this, const char * const name) { this{ name, *mainCluster, 0p, 65000 }; } 74 static inline void ?{}(thread$ & this, const char * const name, struct cluster & cl ) { this{ name, cl, 0p, 65000 }; } 75 static inline void ?{}(thread$ & this, const char * const name, struct cluster & cl, size_t stackSize ) { this{ name, cl, 0p, stackSize }; } 76 76 77 77 struct thread_dtor_guard_t { … … 111 111 // Unpark a thread, if the thread is already blocked, schedule it 112 112 // if the thread is not yet block, signal that it should rerun immediately 113 void unpark( $thread* this );113 void unpark( thread$ * this ); 114 114 115 115 forall( T & | is_thread(T) ) -
src/Concurrency/Keywords.cc
ree23a8d re84ab3d 122 122 // int data; int data; 123 123 // a_struct_t more_data; a_struct_t more_data; 124 // => $thread__thrd_d;124 // => thread$ __thrd_d; 125 125 // }; }; 126 // static inline $thread* get_thread( MyThread * this ) { return &this->__thrd_d; }126 // static inline thread$ * get_thread( MyThread * this ) { return &this->__thrd_d; } 127 127 // 128 128 class ThreadKeyword final : public ConcurrentSueKeyword { … … 130 130 131 131 ThreadKeyword() : ConcurrentSueKeyword( 132 " $thread",132 "thread$", 133 133 "__thrd", 134 134 "get_thread", … … 155 155 // int data; int data; 156 156 // a_struct_t more_data; a_struct_t more_data; 157 // => $coroutine__cor_d;157 // => coroutine$ __cor_d; 158 158 // }; }; 159 // static inline $coroutine* get_coroutine( MyCoroutine * this ) { return &this->__cor_d; }159 // static inline coroutine$ * get_coroutine( MyCoroutine * this ) { return &this->__cor_d; } 160 160 // 161 161 class CoroutineKeyword final : public ConcurrentSueKeyword { … … 163 163 164 164 CoroutineKeyword() : ConcurrentSueKeyword( 165 " $coroutine",165 "coroutine$", 166 166 "__cor", 167 167 "get_coroutine", … … 190 190 // int data; int data; 191 191 // a_struct_t more_data; a_struct_t more_data; 192 // => $monitor__mon_d;192 // => monitor$ __mon_d; 193 193 // }; }; 194 // static inline $monitor* get_coroutine( MyMonitor * this ) { return &this->__cor_d; }194 // static inline monitor$ * get_coroutine( MyMonitor * this ) { return &this->__cor_d; } 195 195 // 196 196 class MonitorKeyword final : public ConcurrentSueKeyword { … … 198 198 199 199 MonitorKeyword() : ConcurrentSueKeyword( 200 " $monitor",200 "monitor$", 201 201 "__mon", 202 202 "get_monitor", … … 230 230 231 231 GeneratorKeyword() : ConcurrentSueKeyword( 232 " $generator",232 "generator$", 233 233 "__generator_state", 234 234 "get_generator", 235 "Unable to find builtin type $generator\n",235 "Unable to find builtin type generator$\n", 236 236 "", 237 237 true, … … 292 292 //Handles mutex routines definitions : 293 293 // void foo( A * mutex a, B * mutex b, int i ) { void foo( A * a, B * b, int i ) { 294 // $monitor* __monitors[] = { get_monitor(a), get_monitor(b) };294 // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 295 295 // monitor_guard_t __guard = { __monitors, 2 }; 296 296 // /*Some code*/ => /*Some code*/ … … 333 333 //Handles mutex routines definitions : 334 334 // void foo( A * mutex a, B * mutex b, int i ) { void foo( A * a, B * b, int i ) { 335 // $monitor* __monitors[] = { get_monitor(a), get_monitor(b) };335 // monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 336 336 // monitor_guard_t __guard = { __monitors, 2 }; 337 337 // /*Some code*/ => /*Some code*/ … … 449 449 Expression * ConcurrentSueKeyword::postmutate( KeywordCastExpr * cast ) { 450 450 if ( cast_target == cast->target ) { 451 // convert (thread &)t to ( $thread&)*get_thread(t), etc.451 // convert (thread &)t to (thread$ &)*get_thread(t), etc. 452 452 if( !type_decl ) SemanticError( cast, context_error ); 453 453 if( !dtor_decl ) SemanticError( cast, context_error ); … … 919 919 void MutexKeyword::postvisit(StructDecl* decl) { 920 920 921 if( decl->name == " $monitor" && decl->body ) {921 if( decl->name == "monitor$" && decl->body ) { 922 922 assert( !monitor_decl ); 923 923 monitor_decl = decl; … … 1020 1020 ); 1021 1021 1022 // $monitor* __monitors[] = { get_monitor(a), get_monitor(b) };1022 //monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 1023 1023 body->push_front( new DeclStmt( monitors ) ); 1024 1024 } … … 1117 1117 ); 1118 1118 1119 // $monitor* __monitors[] = { get_monitor(a), get_monitor(b) };1119 //monitor$ * __monitors[] = { get_monitor(a), get_monitor(b) }; 1120 1120 body->push_front( new DeclStmt( monitors) ); 1121 1121 } … … 1125 1125 //============================================================================================= 1126 1126 void ThreadStarter::previsit( StructDecl * decl ) { 1127 if( decl->name == " $thread" && decl->body ) {1127 if( decl->name == "thread$" && decl->body ) { 1128 1128 assert( !thread_decl ); 1129 1129 thread_decl = decl;
Note: See TracChangeset
for help on using the changeset viewer.