Changeset c18bf9e
- Timestamp:
- May 16, 2022, 12:08:36 PM (19 months ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 4bb5d36
- Parents:
- 108345a
- Location:
- libcfa/src
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/weakso_locks.cfa
r108345a rc18bf9e 18 18 #include "bits/weakso_locks.hfa" 19 19 20 #pragma GCC visibility push(default) 21 20 22 void ?{}( blocking_lock &, bool, bool ) {} 21 23 void ^?{}( blocking_lock & ) {} -
libcfa/src/concurrency/alarm.cfa
r108345a rc18bf9e 141 141 //============================================================================================= 142 142 143 void sleep( Duration duration ) {143 void sleep( Duration duration ) libcfa_public { 144 144 alarm_node_t node = { active_thread(), duration, 0`s }; 145 145 -
libcfa/src/concurrency/clib/cfathread.cfa
r108345a rc18bf9e 326 326 } 327 327 328 #pragma GCC visibility push(default) 329 328 330 //================================================================================ 329 331 // Main Api 330 332 extern "C" { 331 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) {333 int cfathread_cluster_create(cfathread_cluster_t * cl) __attribute__((nonnull(1))) libcfa_public { 332 334 *cl = new(); 333 335 return 0; 334 336 } 335 337 336 cfathread_cluster_t cfathread_cluster_self(void) {338 cfathread_cluster_t cfathread_cluster_self(void) libcfa_public { 337 339 return active_cluster(); 338 340 } 339 341 340 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) {342 int cfathread_cluster_print_stats( cfathread_cluster_t cl ) libcfa_public { 341 343 #if !defined(__CFA_NO_STATISTICS__) 342 344 print_stats_at_exit( *cl, CFA_STATS_READY_Q | CFA_STATS_IO ); -
libcfa/src/concurrency/coroutine.cfa
r108345a rc18bf9e 48 48 //----------------------------------------------------------------------------- 49 49 forall(T &) 50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) {50 void copy(CoroutineCancelled(T) * dst, CoroutineCancelled(T) * src) libcfa_public { 51 51 dst->virtual_table = src->virtual_table; 52 52 dst->the_coroutine = src->the_coroutine; … … 55 55 56 56 forall(T &) 57 const char * msg(CoroutineCancelled(T) *) {57 const char * msg(CoroutineCancelled(T) *) libcfa_public { 58 58 return "CoroutineCancelled(...)"; 59 59 } … … 62 62 forall(T & | is_coroutine(T)) 63 63 void __cfaehm_cancelled_coroutine( 64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) {64 T & cor, coroutine$ * desc, EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)) ) libcfa_public { 65 65 verify( desc->cancellation ); 66 66 desc->state = Cancelled; … … 89 89 90 90 void __stack_prepare( __stack_info_t * this, size_t create_size ); 91 void __stack_clean ( __stack_info_t * this );91 static void __stack_clean ( __stack_info_t * this ); 92 92 93 93 //----------------------------------------------------------------------------- … … 114 114 } 115 115 116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) with( this ) {116 void ?{}( coroutine$ & this, const char name[], void * storage, size_t storageSize ) libcfa_public with( this ) { 117 117 (this.context){0p, 0p}; 118 118 (this.stack){storage, storageSize}; … … 124 124 } 125 125 126 void ^?{}(coroutine$& this) {126 void ^?{}(coroutine$& this) libcfa_public { 127 127 if(this.state != Halted && this.state != Start && this.state != Primed) { 128 128 coroutine$ * src = active_coroutine(); … … 147 147 // Not inline since only ever called once per coroutine 148 148 forall(T & | is_coroutine(T) | { EHM_DEFAULT_VTABLE(CoroutineCancelled, (T)); }) 149 void prime(T& cor) {149 void prime(T& cor) libcfa_public { 150 150 coroutine$* this = get_coroutine(cor); 151 151 assert(this->state == Start); … … 155 155 } 156 156 157 [void *, size_t] __stack_alloc( size_t storageSize ) {157 static [void *, size_t] __stack_alloc( size_t storageSize ) { 158 158 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 159 159 assert(__page_size != 0l); … … 193 193 } 194 194 195 void __stack_clean ( __stack_info_t * this ) {195 static void __stack_clean ( __stack_info_t * this ) { 196 196 void * storage = this->storage->limit; 197 197 … … 215 215 } 216 216 217 void __stack_prepare( __stack_info_t * this, size_t create_size ) {217 void __stack_prepare( __stack_info_t * this, size_t create_size ) libcfa_public { 218 218 const size_t stack_data_size = libCeiling( sizeof(__stack_t), 16 ); // minimum alignment 219 219 bool userStack; -
libcfa/src/concurrency/coroutine.hfa
r108345a rc18bf9e 113 113 114 114 extern void __stack_prepare( __stack_info_t * this, size_t size /* ignored if storage already allocated */); 115 extern void __stack_clean ( __stack_info_t * this );116 117 115 118 116 // Suspend implementation inlined for performance -
libcfa/src/concurrency/exception.cfa
r108345a rc18bf9e 64 64 extern "C" { 65 65 66 struct exception_context_t * this_exception_context(void) {66 struct exception_context_t * this_exception_context(void) libcfa_public { 67 67 return &__get_stack( active_coroutine() )->exception_context; 68 68 } 69 69 70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) {70 _Unwind_Reason_Code __cfaehm_cancellation_unwind( struct _Unwind_Exception * unwind_exception ) libcfa_public { 71 71 _Unwind_Stop_Fn stop_func; 72 72 void * stop_param; -
libcfa/src/concurrency/kernel.cfa
r108345a rc18bf9e 389 389 390 390 // KERNEL_ONLY 391 void returnToKernel() {391 static void returnToKernel() { 392 392 /* paranoid */ verify( ! __preemption_enabled() ); 393 393 coroutine$ * proc_cor = get_coroutine(kernelTLS().this_processor->runner); … … 547 547 } 548 548 549 void unpark( thread$ * thrd, unpark_hint hint ) {549 void unpark( thread$ * thrd, unpark_hint hint ) libcfa_public { 550 550 if( !thrd ) return; 551 551 … … 558 558 } 559 559 560 void park( void ) {560 void park( void ) libcfa_public { 561 561 __disable_interrupts_checked(); 562 562 /* paranoid */ verify( kernelTLS().this_thread->preempted == __NO_PREEMPTION ); … … 601 601 602 602 // KERNEL ONLY 603 bool force_yield( __Preemption_Reason reason ) {603 bool force_yield( __Preemption_Reason reason ) libcfa_public { 604 604 __disable_interrupts_checked(); 605 605 thread$ * thrd = kernelTLS().this_thread; … … 849 849 //----------------------------------------------------------------------------- 850 850 // Debug 851 bool threading_enabled(void) __attribute__((const)) {851 bool threading_enabled(void) __attribute__((const)) libcfa_public { 852 852 return true; 853 853 } … … 856 856 // Statistics 857 857 #if !defined(__CFA_NO_STATISTICS__) 858 void print_halts( processor & this ) {858 void print_halts( processor & this ) libcfa_public { 859 859 this.print_halts = true; 860 860 } … … 873 873 } 874 874 875 void crawl_cluster_stats( cluster & this ) {875 static void crawl_cluster_stats( cluster & this ) { 876 876 // Stop the world, otherwise stats could get really messed-up 877 877 // this doesn't solve all problems but does solve many … … 889 889 890 890 891 void print_stats_now( cluster & this, int flags ) {891 void print_stats_now( cluster & this, int flags ) libcfa_public { 892 892 crawl_cluster_stats( this ); 893 893 __print_stats( this.stats, flags, "Cluster", this.name, (void*)&this ); -
libcfa/src/concurrency/kernel.hfa
r108345a rc18bf9e 49 49 50 50 // Coroutine used py processors for the 2-step context switch 51 coroutine processorCtx_t { 51 52 struct processorCtx_t { 53 struct coroutine$ self; 52 54 struct processor * proc; 53 55 }; -
libcfa/src/concurrency/kernel/cluster.cfa
r108345a rc18bf9e 49 49 50 50 // returns the maximum number of processors the RWLock support 51 __attribute__((weak)) unsigned __max_processors() {51 __attribute__((weak)) unsigned __max_processors() libcfa_public { 52 52 const char * max_cores_s = getenv("CFA_MAX_PROCESSORS"); 53 53 if(!max_cores_s) { -
libcfa/src/concurrency/kernel/private.hfa
r108345a rc18bf9e 109 109 //----------------------------------------------------------------------------- 110 110 // Processor 111 void main(processorCtx_t *); 111 void main(processorCtx_t &); 112 static inline coroutine$* get_coroutine(processorCtx_t & this) { return &this.self; } 112 113 113 114 void * __create_pthread( pthread_t *, void * (*)(void *), void * ); -
libcfa/src/concurrency/kernel/startup.cfa
r108345a rc18bf9e 120 120 #endif 121 121 122 cluster * mainCluster ;122 cluster * mainCluster libcfa_public; 123 123 processor * mainProcessor; 124 124 thread$ * mainThread; … … 169 169 }; 170 170 171 void ?{}( current_stack_info_t & this ) {171 static void ?{}( current_stack_info_t & this ) { 172 172 __stack_context_t ctx; 173 173 CtxGet( ctx ); … … 209 209 // Construct the processor context of the main processor 210 210 void ?{}(processorCtx_t & this, processor * proc) { 211 (this. __cor){ "Processor" };212 this. __cor.starter = 0p;211 (this.self){ "Processor" }; 212 this.self.starter = 0p; 213 213 this.proc = proc; 214 214 } … … 526 526 // Construct the processor context of non-main processors 527 527 static void ?{}(processorCtx_t & this, processor * proc, current_stack_info_t * info) { 528 (this. __cor){ info };528 (this.self){ info }; 529 529 this.proc = proc; 530 530 } … … 578 578 } 579 579 580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) {580 void ?{}(processor & this, const char name[], cluster & _cltr, thread$ * initT) libcfa_public { 581 581 ( this.terminated ){}; 582 582 ( this.runner ){}; … … 591 591 } 592 592 593 void ?{}(processor & this, const char name[], cluster & _cltr) {593 void ?{}(processor & this, const char name[], cluster & _cltr) libcfa_public { 594 594 (this){name, _cltr, 0p}; 595 595 } 596 596 597 597 extern size_t __page_size; 598 void ^?{}(processor & this) with( this ){598 void ^?{}(processor & this) libcfa_public with( this ) { 599 599 /* paranoid */ verify( !__atomic_load_n(&do_terminate, __ATOMIC_ACQUIRE) ); 600 600 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); … … 623 623 } 624 624 625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) with( this ) {625 void ?{}(cluster & this, const char name[], Duration preemption_rate, unsigned num_io, const io_context_params & io_params) libcfa_public with( this ) { 626 626 this.name = name; 627 627 this.preemption_rate = preemption_rate; … … 658 658 } 659 659 660 void ^?{}(cluster & this) {660 void ^?{}(cluster & this) libcfa_public { 661 661 destroy(this.io.arbiter); 662 662 -
libcfa/src/concurrency/locks.cfa
r108345a rc18bf9e 24 24 #include <stdlib.hfa> 25 25 26 #pragma GCC visibility push(default) 27 26 28 //----------------------------------------------------------------------------- 27 29 // info_thread … … 116 118 } 117 119 118 void pop_and_set_new_owner( blocking_lock & this ) with( this ) {120 static void pop_and_set_new_owner( blocking_lock & this ) with( this ) { 119 121 thread$ * t = &try_pop_front( blocked_threads ); 120 122 owner = t; … … 192 194 void ^?{}( alarm_node_wrap(L) & this ) { } 193 195 194 void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) {196 static void timeout_handler ( alarm_node_wrap(L) & this ) with( this ) { 195 197 // This condition_variable member is called from the kernel, and therefore, cannot block, but it can spin. 196 198 lock( cond->lock __cfaabi_dbg_ctx2 ); … … 216 218 217 219 // this casts the alarm node to our wrapped type since we used type erasure 218 void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); }220 static void alarm_node_wrap_cast( alarm_node_t & a ) { timeout_handler( (alarm_node_wrap(L) &)a ); } 219 221 } 220 222 … … 233 235 void ^?{}( condition_variable(L) & this ){ } 234 236 235 void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) {237 static void process_popped( condition_variable(L) & this, info_thread(L) & popped ) with( this ) { 236 238 if(&popped != 0p) { 237 239 popped.signalled = true; … … 278 280 int counter( condition_variable(L) & this ) with(this) { return count; } 279 281 280 s ize_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) {282 static size_t queue_and_get_recursion( condition_variable(L) & this, info_thread(L) * i ) with(this) { 281 283 // add info_thread to waiting queue 282 284 insert_last( blocked_threads, *i ); … … 291 293 292 294 // helper for wait()'s' with no timeout 293 void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) {295 static void queue_info_thread( condition_variable(L) & this, info_thread(L) & i ) with(this) { 294 296 lock( lock __cfaabi_dbg_ctx2 ); 295 297 size_t recursion_count = queue_and_get_recursion(this, &i); … … 308 310 309 311 // helper for wait()'s' with a timeout 310 void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) {312 static void queue_info_thread_timeout( condition_variable(L) & this, info_thread(L) & info, Duration t, Alarm_Callback callback ) with(this) { 311 313 lock( lock __cfaabi_dbg_ctx2 ); 312 314 size_t recursion_count = queue_and_get_recursion(this, &info); … … 343 345 // fast_cond_var 344 346 void ?{}( fast_cond_var(L) & this ){ 345 this.blocked_threads{}; 347 this.blocked_threads{}; 346 348 #ifdef __CFA_DEBUG__ 347 349 this.lock_used = 0p; -
libcfa/src/concurrency/monitor.cfa
r108345a rc18bf9e 44 44 static inline void restore( monitor$ * ctx [], __lock_size_t count, __spinlock_t * locks [], unsigned int /*in */ recursions [], __waitfor_mask_t /*in */ masks [] ); 45 45 46 static inline void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 47 static inline void ?{}(__condition_criterion_t & this ); 48 static inline void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 49 46 50 static inline void init ( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); 47 51 static inline void init_push( __lock_size_t count, monitor$ * monitors [], __condition_node_t & waiter, __condition_criterion_t criteria [] ); … … 243 247 244 248 // Leave single monitor 245 void __leave( monitor$ * this ) {249 static void __leave( monitor$ * this ) { 246 250 // Lock the monitor spinlock 247 251 lock( this->lock __cfaabi_dbg_ctx2 ); … … 278 282 279 283 // Leave single monitor for the last time 280 void __dtor_leave( monitor$ * this, bool join ) {284 static void __dtor_leave( monitor$ * this, bool join ) { 281 285 __cfaabi_dbg_debug_do( 282 286 if( active_thread() != this->owner ) { … … 344 348 // Ctor for monitor guard 345 349 // Sorts monitors before entering 346 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) {350 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count, fptr_t func ) libcfa_public { 347 351 thread$ * thrd = active_thread(); 348 352 … … 369 373 } 370 374 371 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) {375 void ?{}( monitor_guard_t & this, monitor$ * m [], __lock_size_t count ) libcfa_public { 372 376 this{ m, count, 0p }; 373 377 } … … 375 379 376 380 // Dtor for monitor guard 377 void ^?{}( monitor_guard_t & this ) {381 void ^?{}( monitor_guard_t & this ) libcfa_public { 378 382 // __cfaabi_dbg_print_safe( "MGUARD : leaving %d\n", this.count); 379 383 … … 389 393 // Ctor for monitor guard 390 394 // Sorts monitors before entering 391 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) {395 void ?{}( monitor_dtor_guard_t & this, monitor$ * m [], fptr_t func, bool join ) libcfa_public { 392 396 // optimization 393 397 thread$ * thrd = active_thread(); … … 409 413 410 414 // Dtor for monitor guard 411 void ^?{}( monitor_dtor_guard_t & this ) {415 void ^?{}( monitor_dtor_guard_t & this ) libcfa_public { 412 416 // Leave the monitors in order 413 417 __dtor_leave( this.m, this.join ); … … 419 423 //----------------------------------------------------------------------------- 420 424 // Internal scheduling types 421 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) {425 static void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ) { 422 426 this.waiting_thread = waiting_thread; 423 427 this.count = count; … … 426 430 } 427 431 428 void ?{}(__condition_criterion_t & this ) with( this ) {432 static void ?{}(__condition_criterion_t & this ) with( this ) { 429 433 ready = false; 430 434 target = 0p; … … 433 437 } 434 438 435 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) {439 static void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t & owner ) { 436 440 this.ready = false; 437 441 this.target = target; … … 442 446 //----------------------------------------------------------------------------- 443 447 // Internal scheduling 444 void wait( condition & this, uintptr_t user_info = 0 ) {448 void wait( condition & this, uintptr_t user_info = 0 ) libcfa_public { 445 449 brand_condition( this ); 446 450 … … 496 500 } 497 501 498 bool signal( condition & this ) {502 bool signal( condition & this ) libcfa_public { 499 503 if( is_empty( this ) ) { return false; } 500 504 … … 538 542 } 539 543 540 bool signal_block( condition & this ) {544 bool signal_block( condition & this ) libcfa_public { 541 545 if( !this.blocked.head ) { return false; } 542 546 … … 586 590 587 591 // Access the user_info of the thread waiting at the front of the queue 588 uintptr_t front( condition & this ) {592 uintptr_t front( condition & this ) libcfa_public { 589 593 verifyf( !is_empty(this), 590 594 "Attempt to access user data on an empty condition.\n" … … 608 612 // setup mask 609 613 // block 610 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) {614 void __waitfor_internal( const __waitfor_mask_t & mask, int duration ) libcfa_public { 611 615 // This statment doesn't have a contiguous list of monitors... 612 616 // Create one! … … 994 998 // Can't be accepted since a mutex stmt is effectively an anonymous routine 995 999 // Thus we do not need a monitor group 996 void lock( monitor$ * this ) {1000 void lock( monitor$ * this ) libcfa_public { 997 1001 thread$ * thrd = active_thread(); 998 1002 … … 1046 1050 // Leave routine for mutex stmt 1047 1051 // Is just a wrapper around __leave for the is_lock trait to see 1048 void unlock( monitor$ * this ) { __leave( this ); }1052 void unlock( monitor$ * this ) libcfa_public { __leave( this ); } 1049 1053 1050 1054 // Local Variables: // -
libcfa/src/concurrency/monitor.hfa
r108345a rc18bf9e 119 119 } 120 120 121 void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info );122 void ?{}(__condition_criterion_t & this );123 void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner );121 // void ?{}(__condition_node_t & this, thread$ * waiting_thread, __lock_size_t count, uintptr_t user_info ); 122 // void ?{}(__condition_criterion_t & this ); 123 // void ?{}(__condition_criterion_t & this, monitor$ * target, __condition_node_t * owner ); 124 124 125 125 struct condition { -
libcfa/src/concurrency/preemption.cfa
r108345a rc18bf9e 38 38 #endif 39 39 40 __attribute__((weak)) Duration default_preemption() {40 __attribute__((weak)) Duration default_preemption() libcfa_public { 41 41 const char * preempt_rate_s = getenv("CFA_DEFAULT_PREEMPTION"); 42 42 if(!preempt_rate_s) { … … 238 238 //---------- 239 239 // special case for preemption since used often 240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() {240 __attribute__((optimize("no-reorder-blocks"))) bool __preemption_enabled() libcfa_public { 241 241 // create a assembler label before 242 242 // marked as clobber all to avoid movement … … 276 276 // Get data from the TLS block 277 277 // struct asm_region __cfaasm_get; 278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__ )); //no inline to avoid problems278 uintptr_t __cfatls_get( unsigned long int offset ) __attribute__((__noinline__, visibility("default"))); //no inline to avoid problems 279 279 uintptr_t __cfatls_get( unsigned long int offset ) { 280 280 // create a assembler label before … … 295 295 extern "C" { 296 296 // Disable interrupts by incrementing the counter 297 void disable_interrupts(){297 __attribute__((__noinline__, visibility("default"))) void disable_interrupts() libcfa_public { 298 298 // create a assembler label before 299 299 // marked as clobber all to avoid movement … … 326 326 // Enable interrupts by decrementing the counter 327 327 // If counter reaches 0, execute any pending __cfactx_switch 328 void enable_interrupts( bool poll ) {328 void enable_interrupts( bool poll ) libcfa_public { 329 329 // Cache the processor now since interrupts can start happening after the atomic store 330 330 processor * proc = __cfaabi_tls.this_processor; … … 362 362 //----------------------------------------------------------------------------- 363 363 // Kernel Signal Debug 364 void __cfaabi_check_preemption() {364 void __cfaabi_check_preemption() libcfa_public { 365 365 bool ready = __preemption_enabled(); 366 366 if(!ready) { abort("Preemption should be ready"); } -
libcfa/src/concurrency/thread.cfa
r108345a rc18bf9e 26 26 27 27 extern uint32_t __global_random_seed, __global_random_prime, __global_random_mask; 28 29 #pragma GCC visibility push(default) 28 30 29 31 //-----------------------------------------------------------------------------
Note: See TracChangeset
for help on using the changeset viewer.