Changeset eba74ba for src/libcfa/concurrency
- Timestamp:
- May 25, 2018, 2:51:06 PM (7 years ago)
- Branches:
- new-env, with_gc
- Children:
- cdc4d43
- Parents:
- 3ef35bd (diff), 58e822a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - Location:
- src/libcfa/concurrency
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
src/libcfa/concurrency/alarm.c
r3ef35bd reba74ba 10 10 // Created On : Fri Jun 2 11:31:25 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 9 13:36:18201813 // Update Count : 6 112 // Last Modified On : Fri May 25 06:25:47 2018 13 // Update Count : 67 14 14 // 15 15 … … 37 37 38 38 void __kernel_set_timer( Duration alarm ) { 39 verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm.tv); 39 40 setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL ); 40 41 } … … 68 69 } 69 70 70 __cfaabi_dbg_debug_do( bool validate( alarm_list_t * this ) { 71 #if !defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__)) 72 bool validate( alarm_list_t * this ) { 71 73 alarm_node_t ** it = &this->head; 72 74 while( (*it) ) { … … 75 77 76 78 return it == this->tail; 77 }) 79 } 80 #endif 78 81 79 82 static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) { -
src/libcfa/concurrency/kernel
r3ef35bd reba74ba 145 145 __dllist_t(struct processor) idles; 146 146 147 // List of processors 148 __spinlock_t thread_list_lock; 149 __dllist_t(struct thread_desc) threads; 150 147 151 // Link lists fields 148 152 struct { -
src/libcfa/concurrency/kernel.c
r3ef35bd reba74ba 49 49 thread_desc * mainThread; 50 50 51 struct { __dllist_t(thread_desc) list; __spinlock_t lock; } global_threads ;52 51 struct { __dllist_t(cluster ) list; __spinlock_t lock; } global_clusters; 53 52 54 53 //----------------------------------------------------------------------------- 55 54 // Global state 56 57 // volatile thread_local bool preemption_in_progress = 0;58 // volatile thread_local bool preemption_enabled = false;59 // volatile thread_local unsigned short disable_preempt_count = 1;60 61 55 thread_local struct KernelThreadData kernelTLS = { 62 56 NULL, … … 123 117 node.next = NULL; 124 118 node.prev = NULL; 125 doregister( this);119 doregister(curr_cluster, this); 126 120 127 121 monitors{ &self_mon_p, 1, (fptr_t)0 }; … … 172 166 procs{ __get }; 173 167 idles{ __get }; 168 threads{ __get }; 174 169 175 170 doregister(this); … … 523 518 __cfaabi_dbg_print_safe("Kernel : Starting\n"); 524 519 525 global_threads. list{ __get };526 global_threads. lock{};527 520 global_clusters.list{ __get }; 528 521 global_clusters.lock{}; … … 624 617 ^(mainThread){}; 625 618 619 ^(global_clusters.list){}; 620 ^(global_clusters.lock){}; 621 626 622 __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n"); 627 623 } … … 697 693 else { 698 694 int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" ); 695 __cfaabi_dbg_bits_write( abort_text, len ); 699 696 } 700 697 } … … 760 757 //----------------------------------------------------------------------------- 761 758 // Global Queues 762 void doregister( thread_desc & thrd ) {763 // lock ( global_thread.lock );764 // push_front( global_thread.list, thrd );765 // unlock ( global_thread.lock );766 }767 768 void unregister( thread_desc & thrd ) {769 // lock ( global_thread.lock );770 // remove( global_thread.list, thrd );771 // unlock( global_thread.lock );772 }773 774 759 void doregister( cluster & cltr ) { 775 // lock ( global_cluster.lock);776 // push_front( global_cluster.list, cltr );777 // unlock ( global_cluster.lock );760 lock ( global_clusters.lock __cfaabi_dbg_ctx2); 761 push_front( global_clusters.list, cltr ); 762 unlock ( global_clusters.lock ); 778 763 } 779 764 780 765 void unregister( cluster & cltr ) { 781 // lock ( global_cluster.lock ); 782 // remove( global_cluster.list, cltr ); 783 // unlock( global_cluster.lock ); 784 } 785 766 lock ( global_clusters.lock __cfaabi_dbg_ctx2); 767 remove( global_clusters.list, cltr ); 768 unlock( global_clusters.lock ); 769 } 770 771 void doregister( cluster * cltr, thread_desc & thrd ) { 772 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 773 push_front(cltr->threads, thrd); 774 unlock (cltr->thread_list_lock); 775 } 776 777 void unregister( cluster * cltr, thread_desc & thrd ) { 778 lock (cltr->thread_list_lock __cfaabi_dbg_ctx2); 779 remove(cltr->threads, thrd ); 780 unlock(cltr->thread_list_lock); 781 } 786 782 787 783 void doregister( cluster * cltr, processor * proc ) { 788 //lock (cltr->proc_list_lock __cfaabi_dbg_ctx2);789 //push_front(cltr->procs, *proc);790 //unlock (cltr->proc_list_lock);784 lock (cltr->proc_list_lock __cfaabi_dbg_ctx2); 785 push_front(cltr->procs, *proc); 786 unlock (cltr->proc_list_lock); 791 787 } 792 788 793 789 void unregister( cluster * cltr, processor * proc ) { 794 //lock (cltr->proc_list_lock __cfaabi_dbg_ctx2);795 //remove(cltr->procs, *proc );796 //unlock(cltr->proc_list_lock);790 lock (cltr->proc_list_lock __cfaabi_dbg_ctx2); 791 remove(cltr->procs, *proc ); 792 unlock(cltr->proc_list_lock); 797 793 } 798 794 -
src/libcfa/concurrency/kernel_private.h
r3ef35bd reba74ba 101 101 102 102 103 void doregister( struct thread_desc & thrd);104 void unregister( struct thread_desc & thrd);103 void doregister( struct cluster & cltr ); 104 void unregister( struct cluster & cltr ); 105 105 106 void doregister( struct cluster & cltr);107 void unregister( struct cluster & cltr);106 void doregister( struct cluster * cltr, struct thread_desc & thrd ); 107 void unregister( struct cluster * cltr, struct thread_desc & thrd ); 108 108 109 109 void doregister( struct cluster * cltr, struct processor * proc ); -
src/libcfa/concurrency/preemption.c
r3ef35bd reba74ba 15 15 16 16 #include "preemption.h" 17 #include <assert.h> 17 18 18 19 extern "C" { … … 91 92 //Loop throught every thing expired 92 93 while( node = get_expired( alarms, currtime ) ) { 94 // __cfaabi_dbg_print_buffer_decl( " KERNEL: preemption tick.\n" ); 93 95 94 96 // Check if this is a kernel … … 103 105 Duration period = node->period; 104 106 if( period > 0 ) { 107 // __cfaabi_dbg_print_buffer_local( " KERNEL: alarm period is %lu.\n", period.tv ); 105 108 node->alarm = currtime + period; // Alarm is periodic, add currtime to it (used cached current time) 106 109 insert( alarms, node ); // Reinsert the node for the next time it triggers … … 112 115 113 116 // If there are still alarms pending, reset the timer 114 if( alarms->head ) { __kernel_set_timer( alarms->head->alarm - currtime ); } 117 if( alarms->head ) { 118 __cfaabi_dbg_print_buffer_decl( " KERNEL: @%lu(%lu) resetting alarm to %lu.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv); 119 Duration delta = alarms->head->alarm - currtime; 120 Duration caped = max(delta, 50`us); 121 // itimerval tim = { caped }; 122 // __cfaabi_dbg_print_buffer_local( " Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec); 123 124 __kernel_set_timer( caped ); 125 } 115 126 } 116 127 … … 150 161 void disable_interrupts() { 151 162 with( kernelTLS.preemption_state ) { 152 enabled = false; 163 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 164 165 // Set enabled flag to false 166 // should be atomic to avoid preemption in the middle of the operation. 167 // use memory order RELAXED since there is no inter-thread on this variable requirements 168 __atomic_store_n(&enabled, false, __ATOMIC_RELAXED); 169 170 // Signal the compiler that a fence is needed but only for signal handlers 171 __atomic_signal_fence(__ATOMIC_ACQUIRE); 172 153 173 __attribute__((unused)) unsigned short new_val = disable_count + 1; 154 174 disable_count = new_val; … … 160 180 // If counter reaches 0, execute any pending CtxSwitch 161 181 void enable_interrupts( __cfaabi_dbg_ctx_param ) { 162 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add163 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic add182 processor * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store 183 thread_desc * thrd = kernelTLS.this_thread; // Cache the thread now since interrupts can start happening after the atomic store 164 184 165 185 with( kernelTLS.preemption_state ){ … … 170 190 // Check if we need to prempt the thread because an interrupt was missed 171 191 if( prev == 1 ) { 172 enabled = true; 192 static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free"); 193 194 // Set enabled flag to true 195 // should be atomic to avoid preemption in the middle of the operation. 196 // use memory order RELAXED since there is no inter-thread on this variable requirements 197 __atomic_store_n(&enabled, true, __ATOMIC_RELAXED); 198 199 // Signal the compiler that a fence is needed but only for signal handlers 200 __atomic_signal_fence(__ATOMIC_RELEASE); 173 201 if( proc->pending_preemption ) { 174 202 proc->pending_preemption = false; … … 189 217 verifyf( prev != 0u, "Incremented from %u\n", prev ); // If this triggers someone is enabled already enabled interrupts 190 218 if( prev == 1 ) { 191 kernelTLS.preemption_state.enabled = true; 219 static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free"); 220 // Set enabled flag to true 221 // should be atomic to avoid preemption in the middle of the operation. 222 // use memory order RELAXED since there is no inter-thread on this variable requirements 223 __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED); 224 225 // Signal the compiler that a fence is needed but only for signal handlers 226 __atomic_signal_fence(__ATOMIC_RELEASE); 192 227 } 193 228 } … … 335 370 if( !preemption_ready() ) { return; } 336 371 337 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p ).\n", kernelTLS.this_processor, kernelTLS.this_thread);372 __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) ); 338 373 339 374 // Sync flag : prevent recursive calls to the signal handler 340 375 kernelTLS.preemption_state.in_progress = true; 341 376 342 // We are about to CtxSwitch out of the signal handler, let other handlers in 343 signal_unblock( SIGUSR1 ); 377 // Clear sighandler mask before context switching. 378 static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" ); 379 if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) { 380 abort( "internal error, sigprocmask" ); 381 } 344 382 345 383 // TODO: this should go in finish action … … 377 415 case EAGAIN : 378 416 case EINTR : 417 {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );} 379 418 continue; 380 419 case EINVAL : … … 424 463 sigset_t oldset; 425 464 int ret; 426 ret = sigprocmask(0, NULL, &oldset);465 ret = pthread_sigmask(0, NULL, &oldset); 427 466 if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); } 428 467 429 468 ret = sigismember(&oldset, SIGUSR1); 430 469 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 431 432 470 if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); } 471 472 ret = sigismember(&oldset, SIGALRM); 473 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 474 if(ret == 0) { abort("ERROR SIGALRM is enabled"); } 475 476 ret = sigismember(&oldset, SIGTERM); 477 if(ret < 0) { abort("ERROR sigismember returned %d", ret); } 478 if(ret == 1) { abort("ERROR SIGTERM is disabled"); } 433 479 } 434 480 -
src/libcfa/concurrency/thread.c
r3ef35bd reba74ba 42 42 node.next = NULL; 43 43 node.prev = NULL; 44 doregister( this);44 doregister(curr_cluster, this); 45 45 46 46 monitors{ &self_mon_p, 1, (fptr_t)0 }; … … 48 48 49 49 void ^?{}(thread_desc& this) with( this ) { 50 unregister( this);50 unregister(curr_cluster, this); 51 51 ^self_cor{}; 52 52 }
Note:
See TracChangeset
for help on using the changeset viewer.