Changeset 9b1dcc2
- Timestamp:
- Jun 12, 2020, 1:49:17 PM (3 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- cb196f2
- Parents:
- b388ee81
- Location:
- libcfa/src/concurrency
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel.cfa
rb388ee81 r9b1dcc2 300 300 // register the processor unless it's the main thread which is handled in the boot sequence 301 301 if(this != mainProcessor) { 302 this->id = doregister( this);302 this->id = doregister((__processor_id_t*)this); 303 303 ready_queue_grow( this->cltr ); 304 304 } … … 346 346 if(this != mainProcessor) { 347 347 ready_queue_shrink( this->cltr ); 348 unregister( this);348 unregister((__processor_id_t*)this); 349 349 } 350 350 else { … … 416 416 if(unlikely(thrd_dst->preempted != __NO_PREEMPTION)) { 417 417 // The thread was preempted, reschedule it and reset the flag 418 __schedule_thread( thrd_dst );418 __schedule_thread( (__processor_id_t*)this, thrd_dst ); 419 419 break RUNNING; 420 420 } … … 609 609 // Scheduler routines 610 610 // KERNEL ONLY 611 void __schedule_thread( $thread * thrd ) {611 void __schedule_thread( struct __processor_id_t * id, $thread * thrd ) { 612 612 /* paranoid */ verify( thrd ); 613 613 /* paranoid */ verify( thrd->state != Halted ); … … 623 623 if (thrd->preempted == __NO_PREEMPTION) thrd->state = Ready; 624 624 625 ready_schedule_lock ( kernelTLS.this_processor);625 ready_schedule_lock ( id ); 626 626 push( thrd->curr_cluster, thrd ); 627 627 628 628 __wake_one(thrd->curr_cluster); 629 ready_schedule_unlock( kernelTLS.this_processor);629 ready_schedule_unlock( id ); 630 630 631 631 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 636 636 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 637 637 638 ready_schedule_lock (kernelTLS.this_processor );638 ready_schedule_lock ( (__processor_id_t*)kernelTLS.this_processor ); 639 639 $thread * head = pop( this ); 640 ready_schedule_unlock( kernelTLS.this_processor );640 ready_schedule_unlock( (__processor_id_t*)kernelTLS.this_processor ); 641 641 642 642 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); … … 645 645 646 646 // KERNEL ONLY unpark with out disabling interrupts 647 void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 ) {647 void __unpark( struct __processor_id_t * id, $thread * thrd __cfaabi_dbg_ctx_param2 ) { 648 648 static_assert(sizeof(thrd->state) == sizeof(int)); 649 649 … … 663 663 // Wake lost the race, 664 664 thrd->state = Blocked; 665 __schedule_thread( thrd );665 __schedule_thread( id, thrd ); 666 666 break; 667 667 case Rerun: … … 681 681 682 682 disable_interrupts(); 683 __unpark( thrd __cfaabi_dbg_ctx_fwd2 );683 __unpark( (__processor_id_t*)kernelTLS.this_processor, thrd __cfaabi_dbg_ctx_fwd2 ); 684 684 enable_interrupts( __cfaabi_dbg_ctx ); 685 685 } … … 798 798 (*mainProcessor){}; 799 799 800 mainProcessor->id = doregister( mainProcessor);800 mainProcessor->id = doregister( (__processor_id_t*)mainProcessor); 801 801 802 802 //initialize the global state variables … … 809 809 // Add the main thread to the ready queue 810 810 // once resume is called on mainProcessor->runner the mainThread needs to be scheduled like any normal thread 811 __schedule_thread( mainThread);811 __schedule_thread((__processor_id_t *)mainProcessor, mainThread); 812 812 813 813 // SKULLDUGGERY: Force a context switch to the main processor to set the main thread's context to the current UNIX … … 853 853 kernel_stop_preemption(); 854 854 855 unregister( mainProcessor);855 unregister((__processor_id_t*)mainProcessor); 856 856 857 857 // Destroy the main processor and its context in reverse order of construction -
libcfa/src/concurrency/kernel.hfa
rb388ee81 r9b1dcc2 47 47 extern struct cluster * mainCluster; 48 48 49 // Processor 49 // Processor id, required for scheduling threads 50 struct __processor_id_t { 51 unsigned id; 52 }; 53 50 54 coroutine processorCtx_t { 51 55 struct processor * proc; … … 54 58 // Wrapper around kernel threads 55 59 struct processor { 60 inline __processor_id_t; 61 56 62 // Main state 57 63 // Coroutine ctx who does keeps the state of the processor … … 60 66 // Cluster from which to get threads 61 67 struct cluster * cltr; 62 unsigned int id;63 68 64 69 // Name of the processor -
libcfa/src/concurrency/kernel_private.hfa
rb388ee81 r9b1dcc2 25 25 // Scheduler 26 26 27 struct __attribute__((aligned(64))) __scheduler_lock_id_t; 28 27 29 extern "C" { 28 30 void disable_interrupts() OPTIONAL_THREAD; … … 31 33 } 32 34 33 void __schedule_thread( $thread * ) __attribute__((nonnull (1)));35 void __schedule_thread( struct __processor_id_t *, $thread * ) __attribute__((nonnull (2))); 34 36 35 37 //Block current thread and release/wake-up the following resources … … 73 75 74 76 // KERNEL ONLY unpark with out disabling interrupts 75 void __unpark( $thread * thrd __cfaabi_dbg_ctx_param2 );77 void __unpark( struct __processor_id_t *, $thread * thrd __cfaabi_dbg_ctx_param2 ); 76 78 77 79 //----------------------------------------------------------------------------- … … 108 110 // Cells use by the reader writer lock 109 111 // while not generic it only relies on a opaque pointer 110 struct __attribute__((aligned(64))) __ processor_id{111 processor* volatile handle;112 struct __attribute__((aligned(64))) __scheduler_lock_id_t { 113 __processor_id_t * volatile handle; 112 114 volatile bool lock; 113 115 }; … … 115 117 // Lock-Free registering/unregistering of threads 116 118 // Register a processor to a given cluster and get its unique id in return 117 unsigned doregister( struct processor* proc );119 unsigned doregister( struct __processor_id_t * proc ); 118 120 119 121 // Unregister a processor from a given cluster using its id, getting back the original pointer 120 void unregister( struct processor* proc );122 void unregister( struct __processor_id_t * proc ); 121 123 122 124 //======================================================================= … … 167 169 168 170 // data pointer 169 __ processor_id* data;171 __scheduler_lock_id_t * data; 170 172 }; 171 173 … … 178 180 // Reader side : acquire when using the ready queue to schedule but not 179 181 // creating/destroying queues 180 static inline void ready_schedule_lock( struct processor* proc) with(*__scheduler_lock) {182 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) { 181 183 unsigned iproc = proc->id; 182 184 /*paranoid*/ verify(data[iproc].handle == proc); … … 197 199 } 198 200 199 static inline void ready_schedule_unlock( struct processor* proc) with(*__scheduler_lock) {201 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) { 200 202 unsigned iproc = proc->id; 201 203 /*paranoid*/ verify(data[iproc].handle == proc); -
libcfa/src/concurrency/preemption.cfa
rb388ee81 r9b1dcc2 39 39 // FwdDeclarations : timeout handlers 40 40 static void preempt( processor * this ); 41 static void timeout( $thread * this );41 static void timeout( struct __processor_id_t * id, $thread * this ); 42 42 43 43 // FwdDeclarations : Signal handlers … … 90 90 91 91 // Tick one frame of the Discrete Event Simulation for alarms 92 static void tick_preemption( ) {92 static void tick_preemption( struct __processor_id_t * id ) { 93 93 alarm_node_t * node = 0p; // Used in the while loop but cannot be declared in the while condition 94 94 alarm_list_t * alarms = &event_kernel->alarms; // Local copy for ease of reading … … 108 108 } 109 109 else { 110 timeout( node->thrd );110 timeout( id, node->thrd ); 111 111 } 112 112 … … 268 268 269 269 // reserved for future use 270 static void timeout( $thread * this ) {271 __unpark( this __cfaabi_dbg_ctx2 );270 static void timeout( struct __processor_id_t * id, $thread * this ) { 271 __unpark( id, this __cfaabi_dbg_ctx2 ); 272 272 } 273 273 … … 405 405 // Waits on SIGALRM and send SIGUSR1 to whom ever needs it 406 406 static void * alarm_loop( __attribute__((unused)) void * args ) { 407 __processor_id_t id; 408 id.id = doregister(&id); 409 407 410 // Block sigalrms to control when they arrive 408 411 sigset_t mask; … … 449 452 // __cfaabi_dbg_print_safe( "Kernel : Preemption thread tick\n" ); 450 453 lock( event_kernel->lock __cfaabi_dbg_ctx2 ); 451 tick_preemption( );454 tick_preemption( &id ); 452 455 unlock( event_kernel->lock ); 453 456 break; … … 462 465 EXIT: 463 466 __cfaabi_dbg_print_safe( "Kernel : Preemption thread stopping\n" ); 467 unregister(&id); 464 468 return 0p; 465 469 } -
libcfa/src/concurrency/ready_queue.cfa
rb388ee81 r9b1dcc2 74 74 } 75 75 76 void ?{}( __ processor_id & this, struct processor* proc ) {76 void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) { 77 77 this.handle = proc; 78 78 this.lock = false; … … 81 81 //======================================================================= 82 82 // Lock-Free registering/unregistering of threads 83 unsigned doregister( struct processor* proc ) with(*__scheduler_lock) {83 unsigned doregister( struct __processor_id_t * proc ) with(*__scheduler_lock) { 84 84 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); 85 85 … … 89 89 // Check among all the ready 90 90 for(uint_fast32_t i = 0; i < s; i++) { 91 processor* null = 0p; // Re-write every loop since compare thrashes it91 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it 92 92 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null 93 93 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { … … 106 106 107 107 // Step - 3 : Mark space as used and then publish it. 108 __ processor_id * storage = (__processor_id*)&data[n];108 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n]; 109 109 (*storage){ proc }; 110 110 while(true) { … … 125 125 } 126 126 127 void unregister( struct processor* proc ) with(*__scheduler_lock) {127 void unregister( struct __processor_id_t * proc ) with(*__scheduler_lock) { 128 128 unsigned id = proc->id; 129 129 /*paranoid*/ verify(id < ready); … … 188 188 } before, after; 189 189 190 #if defined(__CFA_WITH_VERIFY__)191 // id of last processor to acquire the lock192 // needed only to check for mutual exclusion violations193 unsigned int last_id;194 195 // number of items on this list196 // needed only to check for deadlocks197 unsigned int count;198 #endif199 200 190 // Optional statistic counters 201 191 #if !defined(__CFA_NO_SCHED_STATS__) … … 235 225 void ?{}( __intrusive_lane_t & this ) { 236 226 this.lock = false; 237 #if defined(__CFA_WITH_VERIFY__)238 this.last_id = -1u;239 this.count = 0u;240 #endif241 227 242 228 this.before.link.prev = 0p; … … 279 265 /* paranoid */ verify(tail(this)->link.next == 0p ); 280 266 /* paranoid */ verify(tail(this)->link.prev == head(this) ); 281 /* paranoid */ verify(this.count == 0u );282 267 } 283 268 … … 293 278 /* paranoid */ verify(head(this)->link.prev == 0p); 294 279 295 this.count++;296 297 280 if(this.before.link.ts == 0l) { 298 281 /* paranoid */ verify(tail(this)->link.prev == head(this)); … … 346 329 $thread * next = node->link.next; 347 330 348 #if defined(__CFA_WITH_VERIFY__) 349 this.count--; 350 /* paranoid */ verify(node != tail); 351 /* paranoid */ verify(node); 352 #endif 331 /* paranoid */ verify(node != tail); 332 /* paranoid */ verify(node); 353 333 354 334 // Do the pop … … 637 617 } while( !__atomic_try_acquire( &lanes.data[i].lock ) ); 638 618 639 #if defined(__CFA_WITH_VERIFY__)640 /* paranoid */ verify(lanes.data[i].last_id == -1u);641 /* paranoid */ lanes.data[i].last_id = kernelTLS.this_processor->id;642 #endif643 644 619 bool first = false; 645 620 … … 655 630 arrive( snzi, i ); 656 631 } 657 658 #if defined(__CFA_WITH_VERIFY__)659 /* paranoid */ verifyf( lanes.data[i].last_id == kernelTLS.this_processor->id, "Expected last processor to lock queue %u to be %u, was %u\n", i, lanes.data[i].last_id, kernelTLS.this_processor->id );660 /* paranoid */ verifyf( lanes.data[i].lock, "List %u is not locked\n", i );661 /* paranoid */ lanes.data[i].last_id = -1u;662 #endif663 632 664 633 // Unlock and return … … 698 667 if( !__atomic_try_acquire(&lane.lock) ) return 0p; 699 668 700 #if defined(__CFA_WITH_VERIFY__)701 /* paranoid */ verify(lane.last_id == -1u);702 /* paranoid */ lane.last_id = kernelTLS.this_processor->id;703 #endif704 705 669 706 670 // If list is empty, unlock and retry 707 671 if( is_empty(lane) ) { 708 #if defined(__CFA_WITH_VERIFY__)709 /* paranoid */ verify(lane.last_id == kernelTLS.this_processor->id);710 /* paranoid */ lane.last_id = -1u;711 #endif712 713 672 __atomic_unlock(&lane.lock); 714 673 return 0p; … … 721 680 722 681 /* paranoid */ verify(thrd); 723 /* paranoid */ verify(lane.last_id == kernelTLS.this_processor->id);724 682 /* paranoid */ verify(lane.lock); 725 683 … … 728 686 depart( snzi, w ); 729 687 } 730 731 #if defined(__CFA_WITH_VERIFY__)732 /* paranoid */ verify(lane.last_id == kernelTLS.this_processor->id);733 /* paranoid */ lane.last_id = -1u;734 #endif735 688 736 689 // Unlock and return … … 874 827 ^(snzi){}; 875 828 876 // Make sure that the total thread count stays the same877 #if defined(__CFA_WITH_VERIFY__)878 size_t nthreads = 0;879 for( idx; (size_t)lanes.count ) {880 nthreads += lanes.data[idx].count;881 }882 #endif883 884 829 size_t ocount = lanes.count; 885 830 // Check that we have some space left … … 940 885 } 941 886 } 942 943 // Make sure that the total thread count stayed the same944 #if defined(__CFA_WITH_VERIFY__)945 for( idx; (size_t)lanes.count ) {946 nthreads -= lanes.data[idx].count;947 }948 verifyf(nthreads == 0, "Shrinking changed number of threads");949 #endif950 887 } 951 888 -
libcfa/src/concurrency/thread.cfa
rb388ee81 r9b1dcc2 62 62 verify( this_thrd->context.SP ); 63 63 64 __schedule_thread( this_thrd);64 __schedule_thread( (__processor_id_t *)kernelTLS.this_processor, this_thrd); 65 65 enable_interrupts( __cfaabi_dbg_ctx ); 66 66 }
Note: See TracChangeset
for help on using the changeset viewer.