Changeset 6b33e89 for libcfa/src/concurrency/kernel
- Timestamp:
- Apr 25, 2025, 7:39:09 AM (5 months ago)
- Branches:
- master
- Children:
- 65bd3c2
- Parents:
- b195498
- Location:
- libcfa/src/concurrency/kernel
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel/cluster.cfa
rb195498 r6b33e89 234 234 235 235 static void assign_list(unsigned & valrq, unsigned & valio, dlist(struct processor) & list, unsigned count) { 236 struct processor * it = & list`first;236 struct processor * it = &first( list ); 237 237 for(unsigned i = 0; i < count; i++) { 238 238 /* paranoid */ verifyf( it, "Unexpected null iterator, at index %u of %u\n", i, count); … … 245 245 valio += __shard_factor.io; 246 246 #endif 247 it = & (*it)`next;247 it = &next( *it ); 248 248 } 249 249 } … … 258 258 #if defined(CFA_HAVE_LINUX_IO_URING_H) 259 259 static void assign_io(io_context$ ** data, size_t count, dlist(struct processor) & list) { 260 struct processor * it = & list`first;260 struct processor * it = &first( list ); 261 261 while(it) { 262 262 /* paranoid */ verifyf( it, "Unexpected null iterator\n"); 263 263 /* paranoid */ verifyf( it->io.ctx->cq.id < count, "Processor %p has id %u above count %zu\n", it, it->rdq.id, count); 264 264 data[it->io.ctx->cq.id] = it->io.ctx; 265 it = & (*it)`next;265 it = &next( *it ); 266 266 } 267 267 } -
libcfa/src/concurrency/kernel/private.hfa
rb195498 r6b33e89 10 10 // Created On : Mon Feb 13 12:27:26 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Mar 2 16:04:46 202313 // Update Count : 1 112 // Last Modified On : Mon Apr 21 18:08:48 2025 13 // Update Count : 12 14 14 // 15 15 … … 287 287 static inline [unsigned, uint_fast32_t] ready_mutate_register() { 288 288 unsigned id = register_proc_id(); 289 uint_fast32_t last = ready_mutate_lock(); 290 return [id, last]; 289 return [id, ready_mutate_lock()]; 291 290 } 292 291 -
libcfa/src/concurrency/kernel/startup.cfa
rb195498 r6b33e89 69 69 //----------------------------------------------------------------------------- 70 70 // Start and stop routine for the kernel, declared first to make sure they run first 71 static void __kernel_startup 72 static void __kernel_shutdown(void) __attribute__(( destructor 71 static void __kernel_startup(void) __attribute__(( constructor( STARTUP_PRIORITY_KERNEL ) )); 72 static void __kernel_shutdown(void) __attribute__(( destructor( STARTUP_PRIORITY_KERNEL ) )); 73 73 74 74 //----------------------------------------------------------------------------- … … 78 78 static void * __invoke_processor(void * arg); 79 79 static void __kernel_first_resume( processor * this ); 80 static void __kernel_last_resume 80 static void __kernel_last_resume( processor * this ); 81 81 static void init(processor & this, const char name[], cluster & _cltr, thread$ * initT); 82 82 static void deinit(processor & this); … … 99 99 extern void __kernel_alarm_shutdown(void); 100 100 extern void __cfa_io_start( processor * ); 101 extern void __cfa_io_stop 101 extern void __cfa_io_stop( processor * ); 102 102 103 103 //----------------------------------------------------------------------------- … … 110 110 //----------------------------------------------------------------------------- 111 111 // Kernel storage 112 KERNEL_STORAGE(cluster, 113 KERNEL_STORAGE(processor, 114 KERNEL_STORAGE(thread$, 115 KERNEL_STORAGE(__stack_t, 112 KERNEL_STORAGE(cluster, mainCluster); 113 KERNEL_STORAGE(processor, mainProcessor); 114 KERNEL_STORAGE(thread$, mainThread); 115 KERNEL_STORAGE(__stack_t, mainThreadCtx); 116 116 #if !defined(__CFA_NO_STATISTICS__) 117 117 KERNEL_STORAGE(__stats_t, mainProcStats); 118 118 #endif 119 119 120 cluster 121 processor 122 thread$ 120 cluster * mainCluster libcfa_public; 121 processor * mainProcessor; 122 thread$ * mainThread; 123 123 124 124 extern "C" { … … 150 150 // Struct to steal stack 151 151 struct current_stack_info_t { 152 __stack_t * storage; 153 void * base; 154 void * limit; 155 void * context; 152 __stack_t * storage; // pointer to stack object 153 void * base; // base of stack 154 void * limit; // stack grows towards stack limit 155 void * context; // address of cfa_context_t 156 156 }; 157 157 … … 234 234 //initialize the global state variables 235 235 __cfaabi_tls.this_processor = mainProcessor; 236 __cfaabi_tls.this_thread 236 __cfaabi_tls.this_thread = mainThread; 237 237 238 238 #if !defined( __CFA_NO_STATISTICS__ ) … … 355 355 processor * proc = (processor *) arg; 356 356 __cfaabi_tls.this_processor = proc; 357 __cfaabi_tls.this_thread 357 __cfaabi_tls.this_thread = 0p; 358 358 __cfaabi_tls.preemption_state.[enabled, disable_count] = [false, 1]; 359 359 proc->local_data = &__cfaabi_tls; … … 477 477 stack.storage = info->storage; 478 478 with(*stack.storage) { 479 limit 480 base 479 limit = info->limit; 480 base = info->base; 481 481 } 482 482 __attribute__((may_alias)) intptr_t * istorage = (intptr_t*) &stack.storage; … … 485 485 state = Start; 486 486 starter = 0p; 487 last = 0p;487 this.last = 0p; 488 488 cancellation = 0p; 489 490 491 489 ehm_state.ehm_buffer{}; 490 ehm_state.buffer_lock{}; 491 ehm_state.ehm_enabled = false; 492 492 } 493 493 … … 502 502 self_mon_p = &self_mon; 503 503 rdy_link.next = 0p; 504 rdy_link.ts 504 rdy_link.ts = MAX; 505 505 user_link.next = 0p; 506 506 user_link.prev = 0p; … … 509 509 preferred = ready_queue_new_preferred(); 510 510 last_proc = 0p; 511 PRNG_SET_SEED( random_state, 511 PRNG_SET_SEED( random_state, __global_random_mask ? __global_random_prime : __global_random_prime ^ rdtscl() ); 512 512 #if defined( __CFA_WITH_VERIFY__ ) 513 513 executing = 0p; … … 531 531 this.name = name; 532 532 this.cltr = &_cltr; 533 533 __atomic_add_fetch( &_cltr.procs.constructed, 1u, __ATOMIC_RELAXED ); 534 534 this.rdq.its = 0; 535 535 this.rdq.itr = 0; 536 this.rdq.id 536 this.rdq.id = 0; 537 537 this.rdq.target = MAX; 538 538 this.rdq.last = MAX; … … 545 545 this.io.ctx = 0p; 546 546 this.io.pending = false; 547 this.io.dirty 547 this.io.dirty = false; 548 548 549 549 this.init.thrd = initT; … … 599 599 __cfadbg_print_safe(runtime_core, "Kernel : core %p signaling termination\n", &this); 600 600 601 601 __atomic_sub_fetch( &this.cltr->procs.constructed, 1u, __ATOMIC_RELAXED ); 602 602 603 603 __atomic_store_n(&do_terminate, true, __ATOMIC_RELAXED); … … 619 619 // Cluster 620 620 static void ?{}(__cluster_proc_list & this) { 621 this.fdw 622 this.idle 623 621 this.fdw = 0p; 622 this.idle = 0; 623 this.constructed = 0; 624 624 this.total = 0; 625 625 } … … 706 706 //----------------------------------------------------------------------------- 707 707 // Global Queues 708 static void doregister( cluster 709 lock 708 static void doregister( cluster & cltr ) { 709 lock( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 710 710 push_front( __cfa_dbg_global_clusters.list, cltr ); 711 unlock 712 } 713 714 static void unregister( cluster 715 lock 711 unlock( __cfa_dbg_global_clusters.lock ); 712 } 713 714 static void unregister( cluster & cltr ) { 715 lock( __cfa_dbg_global_clusters.lock __cfaabi_dbg_ctx2); 716 716 remove( __cfa_dbg_global_clusters.list, cltr ); 717 717 unlock( __cfa_dbg_global_clusters.lock ); … … 719 719 720 720 void doregister( cluster * cltr, thread$ & thrd ) { 721 lock 721 lock(cltr->thread_list_lock __cfaabi_dbg_ctx2); 722 722 cltr->nthreads += 1; 723 723 insert_first(cltr->threads, thrd); 724 unlock 724 unlock(cltr->thread_list_lock); 725 725 } 726 726 727 727 void unregister( cluster * cltr, thread$ & thrd ) { 728 lock 728 lock(cltr->thread_list_lock __cfaabi_dbg_ctx2); 729 729 { 730 730 tytagref( dlink(thread$), dlink(thread$) ) ?`inner( thread$ & this ) = void;
Note:
See TracChangeset
for help on using the changeset viewer.