Changeset 74ec742 for libcfa/src/concurrency
- Timestamp:
- May 20, 2022, 10:36:45 AM (4 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 25fa20a
- Parents:
- 29d8c02 (diff), 7831e8fb (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 21 edited
-
alarm.cfa (modified) (1 diff)
-
clib/cfathread.cfa (modified) (1 diff)
-
coroutine.cfa (modified) (10 diffs)
-
coroutine.hfa (modified) (1 diff)
-
exception.cfa (modified) (1 diff)
-
invoke.c (modified) (4 diffs)
-
io.cfa (modified) (3 diffs)
-
io/call.cfa.in (modified) (1 diff)
-
io/setup.cfa (modified) (2 diffs)
-
kernel.cfa (modified) (8 diffs)
-
kernel.hfa (modified) (1 diff)
-
kernel/cluster.cfa (modified) (1 diff)
-
kernel/private.hfa (modified) (1 diff)
-
kernel/startup.cfa (modified) (8 diffs)
-
locks.cfa (modified) (9 diffs)
-
locks.hfa (modified) (5 diffs)
-
monitor.cfa (modified) (18 diffs)
-
monitor.hfa (modified) (1 diff)
-
preemption.cfa (modified) (6 diffs)
-
ready_subqueue.hfa (modified) (2 diffs)
-
thread.cfa (modified) (1 diff)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/alarm.cfa
r29d8c02 r74ec742 141 141 //============================================================================================= 142 142 143 void sleep( Duration duration ) {143 void sleep( Duration duration ) libcfa_public { 144 144 alarm_node_t node = { active_thread(), duration, 0`s }; 145 145 -
libcfa/src/concurrency/clib/cfathread.cfa
r29d8c02 r74ec742 326 326 } 327 327 328 #pragma GCC visibility push(default) 329 328 330 //================================================================================ 329 331 // Main Api 330 332 extern "C" { 331 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {333 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public { 332 334 *cl = new(); 333 335 return 0; 334 336 } 335 337 336 cfathread_cluster_t cfathread_cluster_self(void) {338 cfathread_cluster_t cfathread_cluster_self(void) libcfa_public { 337 339 return active_cluster(); 338 340 } 339 341 340 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {342 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public { 341 343 #if !defined(__CFA_NO_STATISTICS__) 342 344 print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO ); -
libcfa/src/concurrency/coroutine.cfa
r29d8c02 r74ec742 48 48 //----------------------------------------------------------------------------- 49 49 forall(T &) 50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public { 51 51 dst->virtual_table = src->virtual_table; 52 52 dst->the_coroutine = src->the_coroutine; … … 55 55 56 56 forall(T &) 57 const char * msg(CoroutineCancelled(T) *) {57 const char * msg(CoroutineCancelled(T) *) libcfa_public { 58 58 return "CoroutineCancelled(...)"; 59 59 } … … 62 62 forall(T & | is_coroutine(T)) 63 63 void __cfaehm_cancelled_coroutine( 64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) libcfa_public { 65 65 verify( desc->cancellation ); 66 66 desc->state = Cancelled; … … 89 89 90 90 void __stack_prepare( __stack_info_t * this, size_t create_size ); 91 void __stack_clean ( __stack_info_t * this );91 static void __stack_clean ( __stack_info_t * this ); 92 92 93 93 //----------------------------------------------------------------------------- … … 114 114 } 115 115 116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) { 117 117 (this.context){0p, 0p}; 118 118 (this.stack){storage, storageSize}; … … 124 124 } 125 125 126 void ^?{}(coroutine$& this) {126 void ^?{}(coroutine$& this) libcfa_public { 127 127 if(this.state != Halted && this.state != Start && this.state != Primed) { 128 128 coroutine$ * src = active_coroutine(); … … 147 147 // Not inline since only ever called once per coroutine 148 148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); }) 149 void prime(T& cor) {149 void prime(T& cor) libcfa_public { 150 150 coroutine$* this = get_coroutine(cor); 151 151 assert(this->state == Start); … … 155 155 } 156 156 157 [void *, size_t] __stack_alloc( size_t storageSize ) {157 static [void *, size_t] __stack_alloc( size_t storageSize ) { 158 158 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 159 159 assert(__page_size != 0l); … … 193 193 } 194 194 195 void __stack_clean ( __stack_info_t * this ) {195 static void __stack_clean ( __stack_info_t * this ) { 196 196 void * storage = this->storage->limit; 197 197 … … 215 215 } 216 216 217 void __stack_prepare( __stack_info_t * this, size_t create_size ) {217 void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public { 218 218 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 219 219 bool userStack; -
libcfa/src/concurrency/coroutine.hfa
r29d8c02 r74ec742 113 113 114 114 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 115 extern void __stack_clean ( __stack_info_t * this );116 117 115 118 116 // Suspend implementation inlined for performance -
libcfa/src/concurrency/exception.cfa
r29d8c02 r74ec742 64 64 extern "C" { 65 65 66 struct exception_context_t * this_exception_context(void) {66 struct exception_context_t * this_exception_context(void) libcfa_public { 67 67 return &__get_stack( active_coroutine() )->exception_context; 68 68 } 69 69 70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public { 71 71 _Unwind_Stop_Fn stop_func; 72 72 void * stop_param; -
libcfa/src/concurrency/invoke.c
r29d8c02 r74ec742 36 36 extern void enable_interrupts( _Bool poll ); 37 37 38 void __cfactx_invoke_coroutine(38 libcfa_public void __cfactx_invoke_coroutine( 39 39 void (*main)(void *), 40 40 void *this … … 70 70 } 71 71 72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));72 libcfa_public void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__)); 73 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) { 74 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); … … 77 77 } 78 78 79 void __cfactx_invoke_thread(79 libcfa_public void __cfactx_invoke_thread( 80 80 void (*main)(void *), 81 81 void *this … … 98 98 } 99 99 100 void __cfactx_start(100 libcfa_public void __cfactx_start( 101 101 void (*main)(void *), 102 102 struct coroutine$ * cor, -
libcfa/src/concurrency/io.cfa
r29d8c02 r74ec742 244 244 245 245 remote = true; 246 __STATS__( false, io.calls.helped++; )246 __STATS__( true, io.calls.helped++; ) 247 247 } 248 248 proc->io.target = MAX; … … 340 340 // for convenience, return both the index and the pointer to the sqe 341 341 // sqe == &sqes[idx] 342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public { 343 343 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want); 344 344 … … 419 419 } 420 420 421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public { 422 422 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager"); 423 423 -
libcfa/src/concurrency/io/call.cfa.in
r29d8c02 r74ec742 139 139 // I/O Interface 140 140 //============================================================================================= 141 #pragma GCC visibility push(default) 141 142 """ 142 143 -
libcfa/src/concurrency/io/setup.cfa
r29d8c02 r74ec742 26 26 27 27 #if !defined(CFA_HAVE_LINUX_IO_URING_H) 28 void ?{}(io_context_params & this) {}28 void ?{}(io_context_params & this) libcfa_public {} 29 29 30 30 void ?{}($io_context & this, struct cluster & cl) {} … … 66 66 #pragma GCC diagnostic pop 67 67 68 void ?{}(io_context_params & this) {68 void ?{}(io_context_params & this) libcfa_public { 69 69 this.num_entries = 256; 70 70 } -
libcfa/src/concurrency/kernel.cfa
r29d8c02 r74ec742 389 389 390 390 // KERNEL_ONLY 391 void returnToKernel() {391 static void returnToKernel() { 392 392 /* paranoid */ verify( ! __preemption_enabled() ); 393 393 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); … … 547 547 } 548 548 549 void unpark( thread$ * thrd, unpark_hint hint ) {549 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { 550 550 if( !thrd ) return; 551 551 … … 558 558 } 559 559 560 void park( void ) {560 void park( void ) libcfa_public { 561 561 __disable_interrupts_checked(); 562 562 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); … … 601 601 602 602 // KERNEL ONLY 603 bool force_yield( __Preemption_Reason reason ) {603 bool force_yield( __Preemption_Reason reason ) libcfa_public { 604 604 __disable_interrupts_checked(); 605 605 thread$ * thrd = kernelTLS().this_thread; … … 849 849 //----------------------------------------------------------------------------- 850 850 // Debug 851 bool threading_enabled(void) __attribute__((const)) {851 bool threading_enabled(void) __attribute__((const)) libcfa_public { 852 852 return true; 853 853 } … … 856 856 // Statistics 857 857 #if !defined(__CFA_NO_STATISTICS__) 858 void print_halts( processor & this ) {858 void print_halts( processor & this ) libcfa_public { 859 859 this.print_halts = true; 860 860 } … … 873 873 } 874 874 875 void crawl_cluster_stats( cluster & this ) {875 static void crawl_cluster_stats( cluster & this ) { 876 876 // Stop the world, otherwise stats could get really messed-up 877 877 // this doesn't solve all problems but does solve many … … 889 889 890 890 891 void print_stats_now( cluster & this, int flags ) {891 void print_stats_now( cluster & this, int flags ) libcfa_public { 892 892 crawl_cluster_stats( this ); 893 893 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this ); -
libcfa/src/concurrency/kernel.hfa
r29d8c02 r74ec742 49 49 50 50 // Coroutine used py processors for the 2-step context switch 51 coroutine processorCtx_t { 51 52 struct processorCtx_t { 53 struct coroutine$ self; 52 54 struct processor * proc; 53 55 }; -
libcfa/src/concurrency/kernel/cluster.cfa
r29d8c02 r74ec742 49 49 50 50 // returns the maximum number of processors the RWLock support 51 __attribute__((weak)) unsigned __max_processors() {51 __attribute__((weak)) unsigned __max_processors() libcfa_public { 52 52 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); 53 53 if(!max_cores_s) { -
libcfa/src/concurrency/kernel/private.hfa
r29d8c02 r74ec742 109 109 //----------------------------------------------------------------------------- 110 110 // Processor 111 void main(processorCtx_t *); 111 void main(processorCtx_t &); 112 static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; } 112 113 113 114 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); -
libcfa/src/concurrency/kernel/startup.cfa
r29d8c02 r74ec742 120 120 #endif 121 121 122 cluster * mainCluster ;122 cluster * mainCluster libcfa_public; 123 123 processor * mainProcessor; 124 124 thread$ * mainThread; … … 169 169 }; 170 170 171 void ?{}( current_stack_info_t & this ) {171 static void ?{}( current_stack_info_t & this ) { 172 172 __stack_context_t ctx; 173 173 CtxGet( ctx ); … … 209 209 // Construct the processor context of the main processor 210 210 void ?{}(processorCtx_t & this, processor * proc) { 211 (this. __cor){ "Processor" };212 this. __cor.starter = 0p;211 (this.self){ "Processor" }; 212 this.self.starter = 0p; 213 213 this.proc = proc; 214 214 } … … 526 526 // Construct the processor context of non-main processors 527 527 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) { 528 (this. __cor){ info };528 (this.self){ info }; 529 529 this.proc = proc; 530 530 } … … 578 578 } 579 579 580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public { 581 581 ( this.terminated ){}; 582 582 ( this.runner ){}; … … 591 591 } 592 592 593 void ?{}(processor & this, const char name[], cluster & _cltr) {593 void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public { 594 594 (this){name, _cltr, 0p}; 595 595 } 596 596 597 597 extern size_t __page_size; 598 void ^?{}(processor & this) with( this ){598 void ^?{}(processor & this) libcfa_public with( this ) { 599 599 /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ); 600 600 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); … … 623 623 } 624 624 625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) { 626 626 this.name = name; 627 627 this.preemption_rate = preemption_rate; … … 658 658 } 659 659 660 void ^?{}(cluster & this) {660 void ^?{}(cluster & this) libcfa_public { 661 661 destroy(this.io.arbiter); 662 662 -
libcfa/src/concurrency/locks.cfa
r29d8c02 r74ec742 24 24 #include <stdlib.hfa> 25 25 26 #pragma GCC visibility push(default) 27 26 28 //----------------------------------------------------------------------------- 27 29 // info_thread … … 116 118 } 117 119 118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {120 static void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 119 121 thread$ * t = &try_pop_front( blocked_threads ); 120 122 owner = t; … … 192 194 void ^?{}( alarm_node_wrap(L) & this ) { } 193 195 194 void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {196 static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) { 195 197 // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin. 196 198 lock( cond->lock __cfaabi_dbg_ctx2 ); … … 216 218 217 219 // this casts the alarm node to our wrapped type since we used type erasure 218 void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }220 static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); } 219 221 } 220 222 221 223 //----------------------------------------------------------------------------- 222 // condition variable224 // Synchronization Locks 223 225 forall(L & | is_blocking_lock(L)) { 224 226 227 //----------------------------------------------------------------------------- 228 // condition variable 225 229 void ?{}( condition_variable(L) & this ){ 226 230 this.lock{}; … … 231 235 void ^?{}( condition_variable(L) & this ){ } 232 236 233 void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {237 static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) { 234 238 if(&popped != 0p) { 235 239 popped.signalled = true; … … 276 280 int counter( condition_variable(L) & this ) with(this) { return count; } 277 281 278 s ize_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {282 static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) { 279 283 // add info_thread to waiting queue 280 284 insert_last( blocked_threads, *i ); … … 289 293 290 294 // helper for wait()'s' with no timeout 291 void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {295 static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 292 296 lock( lock __cfaabi_dbg_ctx2 ); 293 297 size_t recursion_count = queue_and_get_recursion(this, &i); … … 306 310 307 311 // helper for wait()'s' with a timeout 308 void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {312 static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 309 313 lock( lock __cfaabi_dbg_ctx2 ); 310 314 size_t recursion_count = queue_and_get_recursion(this, &info); … … 337 341 bool wait( condition_variable(L) & this, L & l, Duration duration ) with(this) { WAIT_TIME( 0 , &l , duration ) } 338 342 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ) with(this) { WAIT_TIME( info, &l , duration ) } 343 344 //----------------------------------------------------------------------------- 345 // fast_cond_var 346 void ?{}( fast_cond_var(L) & this ){ 347 this.blocked_threads{}; 348 #ifdef __CFA_DEBUG__ 349 this.lock_used = 0p; 350 #endif 351 } 352 void ^?{}( fast_cond_var(L) & this ){ } 353 354 bool notify_one( fast_cond_var(L) & this ) with(this) { 355 bool ret = ! blocked_threads`isEmpty; 356 if ( ret ) { 357 info_thread(L) & popped = try_pop_front( blocked_threads ); 358 on_notify(*popped.lock, popped.t); 359 } 360 return ret; 361 } 362 bool notify_all( fast_cond_var(L) & this ) with(this) { 363 bool ret = ! blocked_threads`isEmpty; 364 while( ! blocked_threads`isEmpty ) { 365 info_thread(L) & popped = try_pop_front( blocked_threads ); 366 on_notify(*popped.lock, popped.t); 367 } 368 return ret; 369 } 370 371 uintptr_t front( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty ? NULL : blocked_threads`first.info; } 372 bool empty ( fast_cond_var(L) & this ) with(this) { return blocked_threads`isEmpty; } 373 374 void wait( fast_cond_var(L) & this, L & l ) { 375 wait( this, l, 0 ); 376 } 377 378 void wait( fast_cond_var(L) & this, L & l, uintptr_t info ) with(this) { 379 // brand cond lock with lock 380 #ifdef __CFA_DEBUG__ 381 if ( lock_used == 0p ) lock_used = &l; 382 else { assert(lock_used == &l); } 383 #endif 384 info_thread( L ) i = { active_thread(), info, &l }; 385 insert_last( blocked_threads, i ); 386 size_t recursion_count = on_wait( *i.lock ); 387 park( ); 388 on_wakeup(*i.lock, recursion_count); 389 } 339 390 } 340 391 -
libcfa/src/concurrency/locks.hfa
r29d8c02 r74ec742 73 73 static inline void on_notify( owner_lock & this, struct thread$ * t ) { on_notify( (blocking_lock &)this, t ); } 74 74 75 //----------------------------------------------------------------------------- 76 // MCS Lock 75 77 struct mcs_node { 76 78 mcs_node * volatile next; … … 98 100 } 99 101 102 //----------------------------------------------------------------------------- 103 // Linear backoff Spinlock 100 104 struct linear_backoff_then_block_lock { 101 105 // Spin lock used for mutual exclusion … … 199 203 200 204 //----------------------------------------------------------------------------- 205 // Fast Block Lock 206 207 // High efficiency minimal blocking lock 208 // - No reacquire for cond var 209 // - No recursive acquisition 210 // - No ownership 211 struct fast_block_lock { 212 // Spin lock used for mutual exclusion 213 __spinlock_t lock; 214 215 // List of blocked threads 216 dlist( thread$ ) blocked_threads; 217 218 bool held:1; 219 }; 220 221 static inline void ?{}( fast_block_lock & this ) with(this) { 222 lock{}; 223 blocked_threads{}; 224 held = false; 225 } 226 static inline void ^?{}( fast_block_lock & this ) {} 227 static inline void ?{}( fast_block_lock & this, fast_block_lock this2 ) = void; 228 static inline void ?=?( fast_block_lock & this, fast_block_lock this2 ) = void; 229 230 // if this is called recursively IT WILL DEADLOCK!!!!! 231 static inline void lock(fast_block_lock & this) with(this) { 232 lock( lock __cfaabi_dbg_ctx2 ); 233 if (held) { 234 insert_last( blocked_threads, *active_thread() ); 235 unlock( lock ); 236 park( ); 237 return; 238 } 239 held = true; 240 unlock( lock ); 241 } 242 243 static inline void unlock(fast_block_lock & this) with(this) { 244 lock( lock __cfaabi_dbg_ctx2 ); 245 /* paranoid */ verifyf( held != false, "Attempt to release lock %p that isn't held", &this ); 246 thread$ * t = &try_pop_front( blocked_threads ); 247 held = ( t ? true : false ); 248 unpark( t ); 249 unlock( lock ); 250 } 251 252 static inline void on_notify(fast_block_lock & this, struct thread$ * t ) { unpark(t); } 253 static inline size_t on_wait(fast_block_lock & this) { unlock(this); return 0; } 254 static inline void on_wakeup(fast_block_lock & this, size_t recursion ) { } 255 256 //----------------------------------------------------------------------------- 201 257 // is_blocking_lock 202 258 trait is_blocking_lock(L & | sized(L)) { … … 226 282 // Synchronization Locks 227 283 forall(L & | is_blocking_lock(L)) { 284 285 //----------------------------------------------------------------------------- 286 // condition_variable 287 288 // The multi-tool condition variable 289 // - can pass timeouts to wait for either a signal or timeout 290 // - can wait without passing a lock 291 // - can have waiters reacquire different locks while waiting on the same cond var 292 // - has shadow queue 293 // - can be signalled outside of critical sections with no locks held 228 294 struct condition_variable { 229 295 // Spin lock used for mutual exclusion … … 258 324 bool wait( condition_variable(L) & this, L & l, Duration duration ); 259 325 bool wait( condition_variable(L) & this, L & l, uintptr_t info, Duration duration ); 260 } 326 327 //----------------------------------------------------------------------------- 328 // fast_cond_var 329 330 // The trimmed and slim condition variable 331 // - no internal lock so you must hold a lock while using this cond var 332 // - signalling without holding branded lock is UNSAFE! 333 // - only allows usage of one lock, cond var is branded after usage 334 struct fast_cond_var { 335 // List of blocked threads 336 dlist( info_thread(L) ) blocked_threads; 337 338 #ifdef __CFA_DEBUG__ 339 L * lock_used; 340 #endif 341 }; 342 343 344 void ?{}( fast_cond_var(L) & this ); 345 void ^?{}( fast_cond_var(L) & this ); 346 347 bool notify_one( fast_cond_var(L) & this ); 348 bool notify_all( fast_cond_var(L) & this ); 349 350 uintptr_t front( fast_cond_var(L) & this ); 351 352 bool empty ( fast_cond_var(L) & this ); 353 354 void wait( fast_cond_var(L) & this, L & l ); 355 void wait( fast_cond_var(L) & this, L & l, uintptr_t info ); 356 } -
libcfa/src/concurrency/monitor.cfa
r29d8c02 r74ec742 44 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 45 46 static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 47 static inline void ?{}(__condition_criterion_t & this ); 48 static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 49 46 50 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 51 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); … … 243 247 244 248 // Leave single monitor 245 void __leave( monitor$ * this ) {249 static void __leave( monitor$ * this ) { 246 250 // Lock the monitor spinlock 247 251 lock( this->lock __cfaabi_dbg_ctx2 ); … … 278 282 279 283 // Leave single monitor for the last time 280 void __dtor_leave( monitor$ * this, bool join ) {284 static void __dtor_leave( monitor$ * this, bool join ) { 281 285 __cfaabi_dbg_debug_do( 282 286 if( active_thread() != this->owner ) { … … 344 348 // Ctor for monitor guard 345 349 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {350 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public { 347 351 thread$ * thrd = active_thread(); 348 352 … … 369 373 } 370 374 371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {375 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public { 372 376 this{ m, count, 0p }; 373 377 } … … 375 379 376 380 // Dtor for monitor guard 377 void ^?{}( monitor_guard_t & this ) {381 void ^?{}( monitor_guard_t & this ) libcfa_public { 378 382 // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count); 379 383 … … 389 393 // Ctor for monitor guard 390 394 // Sorts monitors before entering 391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {395 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public { 392 396 // optimization 393 397 thread$ * thrd = active_thread(); … … 409 413 410 414 // Dtor for monitor guard 411 void ^?{}( monitor_dtor_guard_t & this ) {415 void ^?{}( monitor_dtor_guard_t & this ) libcfa_public { 412 416 // Leave the monitors in order 413 417 __dtor_leave( this.m, this.join ); … … 419 423 //----------------------------------------------------------------------------- 420 424 // Internal scheduling types 421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {425 static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 422 426 this.waiting_thread = waiting_thread; 423 427 this.count = count; … … 426 430 } 427 431 428 void ?{}(__condition_criterion_t & this ) with( this ) {432 static void ?{}(__condition_criterion_t & this ) with( this ) { 429 433 ready = false; 430 434 target = 0p; … … 433 437 } 434 438 435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {439 static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 436 440 this.ready = false; 437 441 this.target = target; … … 442 446 //----------------------------------------------------------------------------- 443 447 // Internal scheduling 444 void wait( condition & this, uintptr_t user_info = 0 ) {448 void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public { 445 449 brand_condition( this ); 446 450 … … 496 500 } 497 501 498 bool signal( condition & this ) {502 bool signal( condition & this ) libcfa_public { 499 503 if( is_empty( this ) ) { return false; } 500 504 … … 538 542 } 539 543 540 bool signal_block( condition & this ) {544 bool signal_block( condition & this ) libcfa_public { 541 545 if( !this.blocked.head ) { return false; } 542 546 … … 586 590 587 591 // Access the user_info of the thread waiting at the front of the queue 588 uintptr_t front( condition & this ) {592 uintptr_t front( condition & this ) libcfa_public { 589 593 verifyf( !is_empty(this), 590 594 "Attempt to access user data on an empty condition.\n" … … 608 612 // setup mask 609 613 // block 610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {614 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public { 611 615 // This statment doesn't have a contiguous list of monitors... 612 616 // Create one! … … 994 998 // Can't be accepted since a mutex stmt is effectively an anonymous routine 995 999 // Thus we do not need a monitor group 996 void lock( monitor$ * this ) {1000 void lock( monitor$ * this ) libcfa_public { 997 1001 thread$ * thrd = active_thread(); 998 1002 … … 1046 1050 // Leave routine for mutex stmt 1047 1051 // Is just a wrapper around __leave for the is_lock trait to see 1048 void unlock( monitor$ * this ) { __leave( this ); }1052 void unlock( monitor$ * this ) libcfa_public { __leave( this ); } 1049 1053 1050 1054 // Local Variables: // -
libcfa/src/concurrency/monitor.hfa
r29d8c02 r74ec742 119 119 } 120 120 121 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );122 void ?{}(__condition_criterion_t & this );123 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );121 // void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 122 // void ?{}(__condition_criterion_t & this ); 123 // void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 124 124 125 125 struct condition { -
libcfa/src/concurrency/preemption.cfa
r29d8c02 r74ec742 38 38 #endif 39 39 40 __attribute__((weak)) Duration default_preemption() {40 __attribute__((weak)) Duration default_preemption() libcfa_public { 41 41 const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION"); 42 42 if(!preempt_rate_s) { … … 238 238 //---------- 239 239 // special case for preemption since used often 240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public { 241 241 // create a assembler label before 242 242 // marked as clobber all to avoid movement … … 276 276 // Get data from the TLS block 277 277 // struct asm_region __cfaasm_get; 278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__ )); //no inline to avoid problems278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems 279 279 uintptr_t __cfatls_get( unsigned long int offset ) { 280 280 // create a assembler label before … … 295 295 extern "C" { 296 296 // Disable interrupts by incrementing the counter 297 void disable_interrupts(){297 __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public { 298 298 // create a assembler label before 299 299 // marked as clobber all to avoid movement … … 326 326 // Enable interrupts by decrementing the counter 327 327 // If counter reaches 0, execute any pending __cfactx_switch 328 void enable_interrupts( bool poll ) {328 void enable_interrupts( bool poll ) libcfa_public { 329 329 // Cache the processor now since interrupts can start happening after the atomic store 330 330 processor * proc = __cfaabi_tls.this_processor; … … 362 362 //----------------------------------------------------------------------------- 363 363 // Kernel Signal Debug 364 void __cfaabi_check_preemption() {364 void __cfaabi_check_preemption() libcfa_public { 365 365 bool ready = __preemption_enabled(); 366 366 if(!ready) { abort("Preemption should be ready"); } -
libcfa/src/concurrency/ready_subqueue.hfa
r29d8c02 r74ec742 83 83 /* paranoid */ verify( node->link.ts != 0 ); 84 84 /* paranoid */ verify( this.anchor.ts != 0 ); 85 /* paranoid */ verify( (this.anchor.ts == MAX) == is_empty ); 85 86 return [node, this.anchor.ts]; 86 87 } … … 93 94 // Return the timestamp 94 95 static inline unsigned long long ts(__intrusive_lane_t & this) { 95 // Cannot verify here since it may not be locked96 // Cannot verify 'emptiness' here since it may not be locked 96 97 /* paranoid */ verify(this.anchor.ts != 0); 97 98 return this.anchor.ts; -
libcfa/src/concurrency/thread.cfa
r29d8c02 r74ec742 26 26 27 27 extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask; 28 29 #pragma GCC visibility push(default) 28 30 29 31 //-----------------------------------------------------------------------------
Note:
See TracChangeset
for help on using the changeset viewer.