Changeset 90a8125 for libcfa/src/concurrency
- Timestamp:
- Jun 3, 2022, 3:10:01 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 7affcda
- Parents:
- bf0263c (diff), fc134a48 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- libcfa/src/concurrency
- Files:
-
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/alarm.cfa
rbf0263c r90a8125 141 141 //============================================================================================= 142 142 143 void sleep( Duration duration ) {143 void sleep( Duration duration ) libcfa_public { 144 144 alarm_node_t node = { active_thread(), duration, 0`s }; 145 145 -
libcfa/src/concurrency/clib/cfathread.cfa
rbf0263c r90a8125 237 237 238 238 typedef ThreadCancelled(cfathread_object) cfathread_exception; 239 typedef ThreadCancelled_vtable(cfathread_object) cfathread_vtable;239 typedef vtable(ThreadCancelled(cfathread_object)) cfathread_vtable; 240 240 241 241 void defaultResumptionHandler(ThreadCancelled(cfathread_object) & except) { … … 283 283 284 284 typedef ThreadCancelled(__cfainit) __cfainit_exception; 285 typedef ThreadCancelled_vtable(__cfainit) __cfainit_vtable;285 typedef vtable(ThreadCancelled(__cfainit)) __cfainit_vtable; 286 286 287 287 void defaultResumptionHandler(ThreadCancelled(__cfainit) & except) { … … 326 326 } 327 327 328 #pragma GCC visibility push(default) 329 328 330 //================================================================================ 329 331 // Main Api 330 332 extern "C" { 331 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {333 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public { 332 334 *cl = new(); 333 335 return 0; 334 336 } 335 337 336 cfathread_cluster_t cfathread_cluster_self(void) {338 cfathread_cluster_t cfathread_cluster_self(void) libcfa_public { 337 339 return active_cluster(); 338 340 } 339 341 340 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {342 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public { 341 343 #if !defined(__CFA_NO_STATISTICS__) 342 344 print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO ); -
libcfa/src/concurrency/coroutine.cfa
rbf0263c r90a8125 48 48 //----------------------------------------------------------------------------- 49 49 forall(T &) 50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public { 51 51 dst->virtual_table = src->virtual_table; 52 52 dst->the_coroutine = src->the_coroutine; … … 55 55 56 56 forall(T &) 57 const char * msg(CoroutineCancelled(T) *) {57 const char * msg(CoroutineCancelled(T) *) libcfa_public { 58 58 return "CoroutineCancelled(...)"; 59 59 } … … 62 62 forall(T & | is_coroutine(T)) 63 63 void __cfaehm_cancelled_coroutine( 64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled , (T)) ){64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ) libcfa_public { 65 65 verify( desc->cancellation ); 66 66 desc->state = Cancelled; … … 89 89 90 90 void __stack_prepare( __stack_info_t * this, size_t create_size ); 91 void __stack_clean ( __stack_info_t * this );91 static void __stack_clean ( __stack_info_t * this ); 92 92 93 93 //----------------------------------------------------------------------------- … … 114 114 } 115 115 116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) { 117 117 (this.context){0p, 0p}; 118 118 (this.stack){storage, storageSize}; … … 124 124 } 125 125 126 void ^?{}(coroutine$& this) {126 void ^?{}(coroutine$& this) libcfa_public { 127 127 if(this.state != Halted && this.state != Start && this.state != Primed) { 128 128 coroutine$ * src = active_coroutine(); … … 146 146 // Part of the Public API 147 147 // Not inline since only ever called once per coroutine 148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled ,(T)); })149 void prime(T& cor) {148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); }) 149 void prime(T& cor) libcfa_public { 150 150 coroutine$* this = get_coroutine(cor); 151 151 assert(this->state == Start); … … 155 155 } 156 156 157 [void *, size_t] __stack_alloc( size_t storageSize ) {157 static [void *, size_t] __stack_alloc( size_t storageSize ) { 158 158 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 159 159 assert(__page_size != 0l); … … 193 193 } 194 194 195 void __stack_clean ( __stack_info_t * this ) {195 static void __stack_clean ( __stack_info_t * this ) { 196 196 void * storage = this->storage->limit; 197 197 … … 215 215 } 216 216 217 void __stack_prepare( __stack_info_t * this, size_t create_size ) {217 void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public { 218 218 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 219 219 bool userStack; -
libcfa/src/concurrency/coroutine.hfa
rbf0263c r90a8125 22 22 //----------------------------------------------------------------------------- 23 23 // Exception thrown from resume when a coroutine stack is cancelled. 24 EHM_FORALL_EXCEPTION(CoroutineCancelled, (coroutine_t &), (coroutine_t)) ( 24 forall(coroutine_t &) 25 exception CoroutineCancelled { 25 26 coroutine_t * the_coroutine; 26 27 exception_t * the_exception; 27 );28 }; 28 29 29 30 forall(T &) … … 37 38 // Anything that implements this trait can be resumed. 38 39 // Anything that is resumed is a coroutine. 39 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled ,(T))) {40 trait is_coroutine(T & | IS_RESUMPTION_EXCEPTION(CoroutineCancelled(T))) { 40 41 void main(T & this); 41 42 coroutine$ * get_coroutine(T & this); … … 60 61 //----------------------------------------------------------------------------- 61 62 // Public coroutine API 62 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled ,(T)); })63 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); }) 63 64 void prime(T & cor); 64 65 … … 113 114 114 115 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 115 extern void __stack_clean ( __stack_info_t * this );116 117 116 118 117 // Suspend implementation inlined for performance … … 141 140 forall(T & | is_coroutine(T)) 142 141 void __cfaehm_cancelled_coroutine( 143 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled ,(T)) );142 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled(T)) ); 144 143 145 144 // Resume implementation inlined for performance 146 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled ,(T)); })145 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled(T)); }) 147 146 static inline T & resume(T & cor) { 148 147 // optimization : read TLS once and reuse it -
libcfa/src/concurrency/exception.cfa
rbf0263c r90a8125 64 64 extern "C" { 65 65 66 struct exception_context_t * this_exception_context(void) {66 struct exception_context_t * this_exception_context(void) libcfa_public { 67 67 return &__get_stack( active_coroutine() )->exception_context; 68 68 } 69 69 70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public { 71 71 _Unwind_Stop_Fn stop_func; 72 72 void * stop_param; -
libcfa/src/concurrency/invoke.c
rbf0263c r90a8125 36 36 extern void enable_interrupts( _Bool poll ); 37 37 38 void __cfactx_invoke_coroutine(38 libcfa_public void __cfactx_invoke_coroutine( 39 39 void (*main)(void *), 40 40 void *this … … 70 70 } 71 71 72 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__));72 libcfa_public void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) __attribute__ ((__noreturn__)); 73 73 void __cfactx_coroutine_unwind(struct _Unwind_Exception * storage, struct coroutine$ * cor) { 74 74 _Unwind_Reason_Code ret = _Unwind_ForcedUnwind( storage, __cfactx_coroutine_unwindstop, cor ); … … 77 77 } 78 78 79 void __cfactx_invoke_thread(79 libcfa_public void __cfactx_invoke_thread( 80 80 void (*main)(void *), 81 81 void *this … … 98 98 } 99 99 100 void __cfactx_start(100 libcfa_public void __cfactx_start( 101 101 void (*main)(void *), 102 102 struct coroutine$ * cor, -
libcfa/src/concurrency/io.cfa
rbf0263c r90a8125 221 221 const unsigned long long ctsc = rdtscl(); 222 222 223 if(proc->io.target == MAX) {223 if(proc->io.target == UINT_MAX) { 224 224 uint64_t chaos = __tls_rand(); 225 225 unsigned ext = chaos & 0xff; … … 232 232 else { 233 233 const unsigned target = proc->io.target; 234 /* paranoid */ verify( io.tscs[target].tv != MAX );234 /* paranoid */ verify( io.tscs[target].tv != ULLONG_MAX ); 235 235 HELP: if(target < ctxs_count) { 236 236 const unsigned long long cutoff = calc_cutoff(ctsc, ctx->cq.id, ctxs_count, io.data, io.tscs, __shard_factor.io); … … 246 246 __STATS__( true, io.calls.helped++; ) 247 247 } 248 proc->io.target = MAX;248 proc->io.target = UINT_MAX; 249 249 } 250 250 } … … 340 340 // for convenience, return both the index and the pointer to the sqe 341 341 // sqe == &sqes[idx] 342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) {342 struct $io_context * cfa_io_allocate(struct io_uring_sqe * sqes[], __u32 idxs[], __u32 want) libcfa_public { 343 343 // __cfadbg_print_safe(io, "Kernel I/O : attempting to allocate %u\n", want); 344 344 … … 419 419 } 420 420 421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) {421 void cfa_io_submit( struct $io_context * inctx, __u32 idxs[], __u32 have, bool lazy ) __attribute__((nonnull (1))) libcfa_public { 422 422 // __cfadbg_print_safe(io, "Kernel I/O : attempting to submit %u (%s)\n", have, lazy ? "lazy" : "eager"); 423 423 -
libcfa/src/concurrency/io/call.cfa.in
rbf0263c r90a8125 139 139 // I/O Interface 140 140 //============================================================================================= 141 #pragma GCC visibility push(default) 141 142 """ 142 143 -
libcfa/src/concurrency/io/setup.cfa
rbf0263c r90a8125 26 26 27 27 #if !defined(CFA_HAVE_LINUX_IO_URING_H) 28 void ?{}(io_context_params & this) {}28 void ?{}(io_context_params & this) libcfa_public {} 29 29 30 30 void ?{}($io_context & this, struct cluster & cl) {} … … 66 66 #pragma GCC diagnostic pop 67 67 68 void ?{}(io_context_params & this) {68 void ?{}(io_context_params & this) libcfa_public { 69 69 this.num_entries = 256; 70 70 } -
libcfa/src/concurrency/io/types.hfa
rbf0263c r90a8125 17 17 #pragma once 18 18 19 #include <limits.h> 20 19 21 extern "C" { 20 22 #include <linux/types.h> … … 25 27 #include "iofwd.hfa" 26 28 #include "kernel/fwd.hfa" 27 #include "limits.hfa"28 29 29 30 #if defined(CFA_HAVE_LINUX_IO_URING_H) … … 140 141 const __u32 tail = *this->cq.tail; 141 142 142 if(head == tail) return MAX;143 if(head == tail) return ULLONG_MAX; 143 144 144 145 return this->cq.ts; -
libcfa/src/concurrency/kernel.cfa
rbf0263c r90a8125 389 389 390 390 // KERNEL_ONLY 391 void returnToKernel() {391 static void returnToKernel() { 392 392 /* paranoid */ verify( ! __preemption_enabled() ); 393 393 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); … … 547 547 } 548 548 549 void unpark( thread$ * thrd, unpark_hint hint ) {549 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { 550 550 if( !thrd ) return; 551 551 … … 558 558 } 559 559 560 void park( void ) {560 void park( void ) libcfa_public { 561 561 __disable_interrupts_checked(); 562 562 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); … … 601 601 602 602 // KERNEL ONLY 603 bool force_yield( __Preemption_Reason reason ) {603 bool force_yield( __Preemption_Reason reason ) libcfa_public { 604 604 __disable_interrupts_checked(); 605 605 thread$ * thrd = kernelTLS().this_thread; … … 849 849 //----------------------------------------------------------------------------- 850 850 // Debug 851 bool threading_enabled(void) __attribute__((const)) {851 bool threading_enabled(void) __attribute__((const)) libcfa_public { 852 852 return true; 853 853 } … … 856 856 // Statistics 857 857 #if !defined(__CFA_NO_STATISTICS__) 858 void print_halts( processor & this ) {858 void print_halts( processor & this ) libcfa_public { 859 859 this.print_halts = true; 860 860 } … … 873 873 } 874 874 875 void crawl_cluster_stats( cluster & this ) {875 static void crawl_cluster_stats( cluster & this ) { 876 876 // Stop the world, otherwise stats could get really messed-up 877 877 // this doesn't solve all problems but does solve many … … 889 889 890 890 891 void print_stats_now( cluster & this, int flags ) {891 void print_stats_now( cluster & this, int flags ) libcfa_public { 892 892 crawl_cluster_stats( this ); 893 893 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this ); -
libcfa/src/concurrency/kernel.hfa
rbf0263c r90a8125 49 49 50 50 // Coroutine used py processors for the 2-step context switch 51 coroutine processorCtx_t { 51 52 struct processorCtx_t { 53 struct coroutine$ self; 52 54 struct processor * proc; 53 55 }; -
libcfa/src/concurrency/kernel/cluster.cfa
rbf0263c r90a8125 49 49 50 50 // returns the maximum number of processors the RWLock support 51 __attribute__((weak)) unsigned __max_processors() {51 __attribute__((weak)) unsigned __max_processors() libcfa_public { 52 52 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); 53 53 if(!max_cores_s) { … … 233 233 if(is_empty(sl)) { 234 234 assert( sl.anchor.next == 0p ); 235 assert( sl.anchor.ts == -1llu);235 assert( sl.anchor.ts == MAX ); 236 236 assert( mock_head(sl) == sl.prev ); 237 237 } else { 238 238 assert( sl.anchor.next != 0p ); 239 assert( sl.anchor.ts != -1llu);239 assert( sl.anchor.ts != MAX ); 240 240 assert( mock_head(sl) != sl.prev ); 241 241 } … … 259 259 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); 260 260 it->rdq.id = valrq; 261 it->rdq.target = MAX;261 it->rdq.target = UINT_MAX; 262 262 valrq += __shard_factor.readyq; 263 263 #if defined(CFA_HAVE_LINUX_IO_URING_H) 264 264 it->io.ctx->cq.id = valio; 265 it->io.target = MAX;265 it->io.target = UINT_MAX; 266 266 valio += __shard_factor.io; 267 267 #endif … … 472 472 this.prev = mock_head(this); 473 473 this.anchor.next = 0p; 474 this.anchor.ts = -1llu;474 this.anchor.ts = MAX; 475 475 #if !defined(__CFA_NO_STATISTICS__) 476 476 this.cnt = 0; … … 484 484 /* paranoid */ verify( &mock_head(this)->link.ts == &this.anchor.ts ); 485 485 /* paranoid */ verify( mock_head(this)->link.next == 0p ); 486 /* paranoid */ verify( mock_head(this)->link.ts == -1llu);486 /* paranoid */ verify( mock_head(this)->link.ts == MAX ); 487 487 /* paranoid */ verify( mock_head(this) == this.prev ); 488 488 /* paranoid */ verify( __alignof__(__intrusive_lane_t) == 128 ); … … 495 495 // Make sure the list is empty 496 496 /* paranoid */ verify( this.anchor.next == 0p ); 497 /* paranoid */ verify( this.anchor.ts == -1llu);497 /* paranoid */ verify( this.anchor.ts == MAX ); 498 498 /* paranoid */ verify( mock_head(this) == this.prev ); 499 499 } -
libcfa/src/concurrency/kernel/cluster.hfa
rbf0263c r90a8125 19 19 #include "kernel/private.hfa" 20 20 21 #include "limits.hfa"21 #include <limits.h> 22 22 23 23 //----------------------------------------------------------------------- … … 37 37 38 38 static inline void touch_tsc(__timestamp_t * tscs, size_t idx, unsigned long long ts_prev, unsigned long long ts_next) { 39 if (ts_next == MAX) return;39 if (ts_next == ULLONG_MAX) return; 40 40 unsigned long long now = rdtscl(); 41 41 unsigned long long pma = __atomic_load_n(&tscs[ idx ].ma, __ATOMIC_RELAXED); … … 59 59 for(i; shard_factor) { 60 60 unsigned long long ptsc = ts(data[start + i]); 61 if(ptsc != -1ull) {61 if(ptsc != ULLONG_MAX) { 62 62 /* paranoid */ verify( start + i < count ); 63 63 unsigned long long tsc = moving_average(ctsc, ptsc, tscs[start + i].ma); -
libcfa/src/concurrency/kernel/private.hfa
rbf0263c r90a8125 109 109 //----------------------------------------------------------------------------- 110 110 // Processor 111 void main(processorCtx_t *); 111 void main(processorCtx_t &); 112 static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; } 112 113 113 114 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); -
libcfa/src/concurrency/kernel/startup.cfa
rbf0263c r90a8125 120 120 #endif 121 121 122 cluster * mainCluster ;122 cluster * mainCluster libcfa_public; 123 123 processor * mainProcessor; 124 124 thread$ * mainThread; … … 169 169 }; 170 170 171 void ?{}( current_stack_info_t & this ) {171 static void ?{}( current_stack_info_t & this ) { 172 172 __stack_context_t ctx; 173 173 CtxGet( ctx ); … … 209 209 // Construct the processor context of the main processor 210 210 void ?{}(processorCtx_t & this, processor * proc) { 211 (this. __cor){ "Processor" };212 this. __cor.starter = 0p;211 (this.self){ "Processor" }; 212 this.self.starter = 0p; 213 213 this.proc = proc; 214 214 } … … 507 507 self_mon_p = &self_mon; 508 508 link.next = 0p; 509 link.ts = -1llu;509 link.ts = MAX; 510 510 preferred = ready_queue_new_preferred(); 511 511 last_proc = 0p; … … 526 526 // Construct the processor context of non-main processors 527 527 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) { 528 (this. __cor){ info };528 (this.self){ info }; 529 529 this.proc = proc; 530 530 } … … 578 578 } 579 579 580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public { 581 581 ( this.terminated ){}; 582 582 ( this.runner ){}; … … 591 591 } 592 592 593 void ?{}(processor & this, const char name[], cluster & _cltr) {593 void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public { 594 594 (this){name, _cltr, 0p}; 595 595 } 596 596 597 597 extern size_t __page_size; 598 void ^?{}(processor & this) with( this ){598 void ^?{}(processor & this) libcfa_public with( this ) { 599 599 /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ); 600 600 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); … … 623 623 } 624 624 625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) { 626 626 this.name = name; 627 627 this.preemption_rate = preemption_rate; … … 658 658 } 659 659 660 void ^?{}(cluster & this) {660 void ^?{}(cluster & this) libcfa_public { 661 661 destroy(this.io.arbiter); 662 662 -
libcfa/src/concurrency/locks.cfa
rbf0263c r90a8125 24 24 #include <stdlib.hfa> 25 25 26 #pragma GCC visibility push(default) 27 26 28 //----------------------------------------------------------------------------- 27 29 // info_thread … … 116 118 } 117 119 118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {120 static void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 119 121 thread$ * t = &try_pop_front( blocked_threads ); 120 122 owner = t; … … 192 194 void ^?{}( alarm_node_wrap(L) & this ) { } 193 195 194 void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {196 static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) { 195 197 // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin. 196 198 lock( cond->lock __cfaabi_dbg_ctx2 ); … … 216 218 217 219 // this casts the alarm node to our wrapped type since we used type erasure 218 void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }220 static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); } 219 221 } 220 222 … … 233 235 void ^?{}( condition_variable(L) & this ){ } 234 236 235 void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {237 static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) { 236 238 if(&popped != 0p) { 237 239 popped.signalled = true; … … 278 280 int counter( condition_variable(L) & this ) with(this) { return count; } 279 281 280 s ize_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {282 static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) { 281 283 // add info_thread to waiting queue 282 284 insert_last( blocked_threads, *i ); … … 291 293 292 294 // helper for wait()'s' with no timeout 293 void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {295 static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 294 296 lock( lock __cfaabi_dbg_ctx2 ); 295 297 size_t recursion_count = queue_and_get_recursion(this, &i); … … 308 310 309 311 // helper for wait()'s' with a timeout 310 void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {312 static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 311 313 lock( lock __cfaabi_dbg_ctx2 ); 312 314 size_t recursion_count = queue_and_get_recursion(this, &info); … … 343 345 // fast_cond_var 344 346 void ?{}( fast_cond_var(L) & this ){ 345 this.blocked_threads{}; 347 this.blocked_threads{}; 346 348 #ifdef __CFA_DEBUG__ 347 349 this.lock_used = 0p; -
libcfa/src/concurrency/monitor.cfa
rbf0263c r90a8125 44 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 45 46 static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 47 static inline void ?{}(__condition_criterion_t & this ); 48 static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 49 46 50 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 51 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); … … 243 247 244 248 // Leave single monitor 245 void __leave( monitor$ * this ) {249 static void __leave( monitor$ * this ) { 246 250 // Lock the monitor spinlock 247 251 lock( this->lock __cfaabi_dbg_ctx2 ); … … 278 282 279 283 // Leave single monitor for the last time 280 void __dtor_leave( monitor$ * this, bool join ) {284 static void __dtor_leave( monitor$ * this, bool join ) { 281 285 __cfaabi_dbg_debug_do( 282 286 if( active_thread() != this->owner ) { … … 344 348 // Ctor for monitor guard 345 349 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {350 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public { 347 351 thread$ * thrd = active_thread(); 348 352 … … 369 373 } 370 374 371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {375 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public { 372 376 this{ m, count, 0p }; 373 377 } … … 375 379 376 380 // Dtor for monitor guard 377 void ^?{}( monitor_guard_t & this ) {381 void ^?{}( monitor_guard_t & this ) libcfa_public { 378 382 // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count); 379 383 … … 389 393 // Ctor for monitor guard 390 394 // Sorts monitors before entering 391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {395 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public { 392 396 // optimization 393 397 thread$ * thrd = active_thread(); … … 409 413 410 414 // Dtor for monitor guard 411 void ^?{}( monitor_dtor_guard_t & this ) {415 void ^?{}( monitor_dtor_guard_t & this ) libcfa_public { 412 416 // Leave the monitors in order 413 417 __dtor_leave( this.m, this.join ); … … 419 423 //----------------------------------------------------------------------------- 420 424 // Internal scheduling types 421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {425 static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 422 426 this.waiting_thread = waiting_thread; 423 427 this.count = count; … … 426 430 } 427 431 428 void ?{}(__condition_criterion_t & this ) with( this ) {432 static void ?{}(__condition_criterion_t & this ) with( this ) { 429 433 ready = false; 430 434 target = 0p; … … 433 437 } 434 438 435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {439 static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 436 440 this.ready = false; 437 441 this.target = target; … … 442 446 //----------------------------------------------------------------------------- 443 447 // Internal scheduling 444 void wait( condition & this, uintptr_t user_info = 0 ) {448 void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public { 445 449 brand_condition( this ); 446 450 … … 496 500 } 497 501 498 bool signal( condition & this ) {502 bool signal( condition & this ) libcfa_public { 499 503 if( is_empty( this ) ) { return false; } 500 504 … … 538 542 } 539 543 540 bool signal_block( condition & this ) {544 bool signal_block( condition & this ) libcfa_public { 541 545 if( !this.blocked.head ) { return false; } 542 546 … … 586 590 587 591 // Access the user_info of the thread waiting at the front of the queue 588 uintptr_t front( condition & this ) {592 uintptr_t front( condition & this ) libcfa_public { 589 593 verifyf( !is_empty(this), 590 594 "Attempt to access user data on an empty condition.\n" … … 608 612 // setup mask 609 613 // block 610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {614 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public { 611 615 // This statment doesn't have a contiguous list of monitors... 612 616 // Create one! … … 994 998 // Can't be accepted since a mutex stmt is effectively an anonymous routine 995 999 // Thus we do not need a monitor group 996 void lock( monitor$ * this ) {1000 void lock( monitor$ * this ) libcfa_public { 997 1001 thread$ * thrd = active_thread(); 998 1002 … … 1046 1050 // Leave routine for mutex stmt 1047 1051 // Is just a wrapper around __leave for the is_lock trait to see 1048 void unlock( monitor$ * this ) { __leave( this ); }1052 void unlock( monitor$ * this ) libcfa_public { __leave( this ); } 1049 1053 1050 1054 // Local Variables: // -
libcfa/src/concurrency/monitor.hfa
rbf0263c r90a8125 119 119 } 120 120 121 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );122 void ?{}(__condition_criterion_t & this );123 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );121 // void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 122 // void ?{}(__condition_criterion_t & this ); 123 // void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 124 124 125 125 struct condition { -
libcfa/src/concurrency/preemption.cfa
rbf0263c r90a8125 38 38 #endif 39 39 40 __attribute__((weak)) Duration default_preemption() {40 __attribute__((weak)) Duration default_preemption() libcfa_public { 41 41 const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION"); 42 42 if(!preempt_rate_s) { … … 238 238 //---------- 239 239 // special case for preemption since used often 240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public { 241 241 // create a assembler label before 242 242 // marked as clobber all to avoid movement … … 276 276 // Get data from the TLS block 277 277 // struct asm_region __cfaasm_get; 278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__ )); //no inline to avoid problems278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems 279 279 uintptr_t __cfatls_get( unsigned long int offset ) { 280 280 // create a assembler label before … … 295 295 extern "C" { 296 296 // Disable interrupts by incrementing the counter 297 void disable_interrupts(){297 __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public { 298 298 // create a assembler label before 299 299 // marked as clobber all to avoid movement … … 326 326 // Enable interrupts by decrementing the counter 327 327 // If counter reaches 0, execute any pending __cfactx_switch 328 void enable_interrupts( bool poll ) {328 void enable_interrupts( bool poll ) libcfa_public { 329 329 // Cache the processor now since interrupts can start happening after the atomic store 330 330 processor * proc = __cfaabi_tls.this_processor; … … 362 362 //----------------------------------------------------------------------------- 363 363 // Kernel Signal Debug 364 void __cfaabi_check_preemption() {364 void __cfaabi_check_preemption() libcfa_public { 365 365 bool ready = __preemption_enabled(); 366 366 if(!ready) { abort("Preemption should be ready"); } -
libcfa/src/concurrency/ready_queue.cfa
rbf0263c r90a8125 125 125 const unsigned long long ctsc = rdtscl(); 126 126 127 if(proc->rdq.target == MAX) {127 if(proc->rdq.target == UINT_MAX) { 128 128 uint64_t chaos = __tls_rand(); 129 129 unsigned ext = chaos & 0xff; … … 137 137 const unsigned target = proc->rdq.target; 138 138 __cfadbg_print_safe(ready_queue, "Kernel : %u considering helping %u, tcsc %llu\n", this, target, readyQ.tscs[target].tv); 139 /* paranoid */ verify( readyQ.tscs[target].tv != MAX );139 /* paranoid */ verify( readyQ.tscs[target].tv != ULLONG_MAX ); 140 140 if(target < lanes_count) { 141 141 const unsigned long long cutoff = calc_cutoff(ctsc, proc->rdq.id, lanes_count, cltr->sched.readyQ.data, cltr->sched.readyQ.tscs, __shard_factor.readyq); … … 147 147 } 148 148 } 149 proc->rdq.target = MAX;149 proc->rdq.target = UINT_MAX; 150 150 } 151 151 … … 245 245 // get preferred ready for new thread 246 246 unsigned ready_queue_new_preferred() { 247 unsigned pref = MAX;247 unsigned pref = UINT_MAX; 248 248 if(struct thread$ * thrd = publicTLS_get( this_thread )) { 249 249 pref = thrd->preferred; -
libcfa/src/concurrency/ready_subqueue.hfa
rbf0263c r90a8125 32 32 /* paranoid */ verify( this.lock ); 33 33 /* paranoid */ verify( node->link.next == 0p ); 34 /* paranoid */ verify( node->link.ts== MAX );34 /* paranoid */ verify( __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED) == MAX ); 35 35 /* paranoid */ verify( this.prev->link.next == 0p ); 36 /* paranoid */ verify( this.prev->link.ts== MAX );36 /* paranoid */ verify( __atomic_load_n(&this.prev->link.ts, __ATOMIC_RELAXED) == MAX ); 37 37 if( this.anchor.next == 0p ) { 38 38 /* paranoid */ verify( this.anchor.next == 0p ); 39 /* paranoid */ verify( this.anchor.ts== MAX );40 /* paranoid */ verify( this.anchor.ts!= 0 );39 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) == MAX ); 40 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 41 41 /* paranoid */ verify( this.prev == mock_head( this ) ); 42 42 } else { 43 43 /* paranoid */ verify( this.anchor.next != 0p ); 44 /* paranoid */ verify( this.anchor.ts!= MAX );45 /* paranoid */ verify( this.anchor.ts!= 0 );44 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX ); 45 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 46 46 /* paranoid */ verify( this.prev != mock_head( this ) ); 47 47 } … … 62 62 /* paranoid */ verify( this.lock ); 63 63 /* paranoid */ verify( this.anchor.next != 0p ); 64 /* paranoid */ verify( this.anchor.ts!= MAX );65 /* paranoid */ verify( this.anchor.ts != 0);64 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != MAX ); 65 /* paranoid */ verify( __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED) != 0 ); 66 66 67 67 // Get the relevant nodes locally 68 68 thread$ * node = this.anchor.next; 69 69 this.anchor.next = node->link.next; 70 this.anchor.ts = node->link.ts;70 __atomic_store_n(&this.anchor.ts, __atomic_load_n(&node->link.ts, __ATOMIC_RELAXED), __ATOMIC_RELAXED); 71 71 bool is_empty = this.anchor.next == 0p; 72 72 node->link.next = 0p; 73 node->link.ts = MAX;73 __atomic_store_n(&node->link.ts, ULLONG_MAX, __ATOMIC_RELAXED); 74 74 #if !defined(__CFA_NO_STATISTICS__) 75 75 this.cnt--; … … 79 79 if(is_empty) this.prev = mock_head( this ); 80 80 81 unsigned long long ats = __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED); 81 82 /* paranoid */ verify( node->link.next == 0p ); 82 /* paranoid */ verify( node->link.ts == MAX);83 /* paranoid */ verify( node->link.ts != 0);84 /* paranoid */ verify( this.anchor.ts != 0);85 /* paranoid */ verify( ( this.anchor.ts== MAX) == is_empty );86 return [node, this.anchor.ts];83 /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) == MAX ); 84 /* paranoid */ verify( __atomic_load_n(&node->link.ts , __ATOMIC_RELAXED) != 0 ); 85 /* paranoid */ verify( ats != 0 ); 86 /* paranoid */ verify( (ats == MAX) == is_empty ); 87 return [node, ats]; 87 88 } 88 89 … … 96 97 // Cannot verify 'emptiness' here since it may not be locked 97 98 /* paranoid */ verify(this.anchor.ts != 0); 98 return this.anchor.ts; 99 /* paranoid */ static_assert(__atomic_always_lock_free(sizeof(this.anchor.ts), &this.anchor.ts)); 100 return __atomic_load_n(&this.anchor.ts, __ATOMIC_RELAXED); 99 101 } -
libcfa/src/concurrency/thread.cfa
rbf0263c r90a8125 19 19 #include "thread.hfa" 20 20 21 #include "exception.hfa" 21 22 #include "kernel/private.hfa" 22 #include " exception.hfa"23 #include "limits.hfa" 23 24 24 25 #define __CFA_INVOKE_PRIVATE__ … … 26 27 27 28 extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask; 29 30 #pragma GCC visibility push(default) 28 31 29 32 //----------------------------------------------------------------------------- … … 42 45 curr_cluster = &cl; 43 46 link.next = 0p; 44 link.ts = -1llu;47 link.ts = MAX; 45 48 preferred = ready_queue_new_preferred(); 46 49 last_proc = 0p; … … 86 89 } 87 90 88 forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled ,(T))89 | { EHM_DEFAULT_VTABLE(ThreadCancelled ,(T)); })91 forall(T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T)) 92 | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) 90 93 void ?{}( thread_dtor_guard_t & this, 91 94 T & thrd, void(*cancelHandler)(ThreadCancelled(T) &)) { … … 165 168 166 169 //----------------------------------------------------------------------------- 167 forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled ,(T))168 | { EHM_DEFAULT_VTABLE(ThreadCancelled,(T)); })170 forall(T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T)) 171 | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) 169 172 T & join( T & this ) { 170 173 thread_dtor_guard_t guard = { this, defaultResumptionHandler }; -
libcfa/src/concurrency/thread.hfa
rbf0263c r90a8125 32 32 }; 33 33 34 EHM_FORALL_EXCEPTION(ThreadCancelled, (thread_t &), (thread_t)) ( 34 forall(thread_t &) 35 exception ThreadCancelled { 35 36 thread_t * the_thread; 36 37 exception_t * the_exception; 37 );38 }; 38 39 39 40 forall(T &) … … 79 80 }; 80 81 81 forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled ,(T))82 | { EHM_DEFAULT_VTABLE(ThreadCancelled,(T)); })82 forall( T & | is_thread(T) | IS_EXCEPTION(ThreadCancelled(T)) 83 | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) 83 84 void ?{}( thread_dtor_guard_t & this, T & thrd, void(*)(ThreadCancelled(T) &) ); 84 85 void ^?{}( thread_dtor_guard_t & this ); … … 126 127 //---------- 127 128 // join 128 forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled ,(T))129 | { EHM_DEFAULT_VTABLE(ThreadCancelled,(T)); })129 forall( T & | is_thread(T) | IS_RESUMPTION_EXCEPTION(ThreadCancelled(T)) 130 | { EHM_DEFAULT_VTABLE(ThreadCancelled(T)); }) 130 131 T & join( T & this ); 131 132
Note:
See TracChangeset
for help on using the changeset viewer.