Ignore:
Timestamp:
May 25, 2018, 2:51:06 PM (7 years ago)
Author:
Aaron Moss <a3moss@…>
Branches:
new-env, with_gc
Children:
cdc4d43
Parents:
3ef35bd (diff), 58e822a (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge remote-tracking branch 'origin/master' into with_gc

Location:
src/libcfa/concurrency
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/concurrency/alarm.c

    r3ef35bd reba74ba  
    1010// Created On       : Fri Jun 2 11:31:25 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Apr  9 13:36:18 2018
    13 // Update Count     : 61
     12// Last Modified On : Fri May 25 06:25:47 2018
     13// Update Count     : 67
    1414//
    1515
     
    3737
    3838void __kernel_set_timer( Duration alarm ) {
     39        verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm.tv);
    3940        setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL );
    4041}
     
    6869}
    6970
    70 __cfaabi_dbg_debug_do( bool validate( alarm_list_t * this ) {
     71#if !defined(NDEBUG) && (defined(__CFA_DEBUG__) || defined(__CFA_VERIFY__))
     72bool validate( alarm_list_t * this ) {
    7173        alarm_node_t ** it = &this->head;
    7274        while( (*it) ) {
     
    7577
    7678        return it == this->tail;
    77 })
     79}
     80#endif
    7881
    7982static inline void insert_at( alarm_list_t * this, alarm_node_t * n, __alarm_it_t p ) {
  • src/libcfa/concurrency/kernel

    r3ef35bd reba74ba  
    145145        __dllist_t(struct processor) idles;
    146146
     147        // List of processors
     148        __spinlock_t thread_list_lock;
     149        __dllist_t(struct thread_desc) threads;
     150
    147151        // Link lists fields
    148152        struct {
  • src/libcfa/concurrency/kernel.c

    r3ef35bd reba74ba  
    4949thread_desc * mainThread;
    5050
    51 struct { __dllist_t(thread_desc) list; __spinlock_t lock; } global_threads ;
    5251struct { __dllist_t(cluster    ) list; __spinlock_t lock; } global_clusters;
    5352
    5453//-----------------------------------------------------------------------------
    5554// Global state
    56 
    57 // volatile thread_local bool preemption_in_progress = 0;
    58 // volatile thread_local bool preemption_enabled = false;
    59 // volatile thread_local unsigned short disable_preempt_count = 1;
    60 
    6155thread_local struct KernelThreadData kernelTLS = {
    6256        NULL,
     
    123117        node.next = NULL;
    124118        node.prev = NULL;
    125         doregister(this);
     119        doregister(curr_cluster, this);
    126120
    127121        monitors{ &self_mon_p, 1, (fptr_t)0 };
     
    172166        procs{ __get };
    173167        idles{ __get };
     168        threads{ __get };
    174169
    175170        doregister(this);
     
    523518        __cfaabi_dbg_print_safe("Kernel : Starting\n");
    524519
    525         global_threads. list{ __get };
    526         global_threads. lock{};
    527520        global_clusters.list{ __get };
    528521        global_clusters.lock{};
     
    624617        ^(mainThread){};
    625618
     619        ^(global_clusters.list){};
     620        ^(global_clusters.lock){};
     621
    626622        __cfaabi_dbg_print_safe("Kernel : Shutdown complete\n");
    627623}
     
    697693        else {
    698694                int len = snprintf( abort_text, abort_text_size, "Error occurred outside of any thread.\n" );
     695                __cfaabi_dbg_bits_write( abort_text, len );
    699696        }
    700697}
     
    760757//-----------------------------------------------------------------------------
    761758// Global Queues
    762 void doregister( thread_desc & thrd ) {
    763         // lock      ( global_thread.lock );
    764         // push_front( global_thread.list, thrd );
    765         // unlock    ( global_thread.lock );
    766 }
    767 
    768 void unregister( thread_desc & thrd ) {
    769         // lock  ( global_thread.lock );
    770         // remove( global_thread.list, thrd );
    771         // unlock( global_thread.lock );
    772 }
    773 
    774759void doregister( cluster     & cltr ) {
    775         // lock      ( global_cluster.lock );
    776         // push_front( global_cluster.list, cltr );
    777         // unlock    ( global_cluster.lock );
     760        lock      ( global_clusters.lock __cfaabi_dbg_ctx2);
     761        push_front( global_clusters.list, cltr );
     762        unlock    ( global_clusters.lock );
    778763}
    779764
    780765void unregister( cluster     & cltr ) {
    781         // lock  ( global_cluster.lock );
    782         // remove( global_cluster.list, cltr );
    783         // unlock( global_cluster.lock );
    784 }
    785 
     766        lock  ( global_clusters.lock __cfaabi_dbg_ctx2);
     767        remove( global_clusters.list, cltr );
     768        unlock( global_clusters.lock );
     769}
     770
     771void doregister( cluster * cltr, thread_desc & thrd ) {
     772        lock      (cltr->thread_list_lock __cfaabi_dbg_ctx2);
     773        push_front(cltr->threads, thrd);
     774        unlock    (cltr->thread_list_lock);
     775}
     776
     777void unregister( cluster * cltr, thread_desc & thrd ) {
     778        lock  (cltr->thread_list_lock __cfaabi_dbg_ctx2);
     779        remove(cltr->threads, thrd );
     780        unlock(cltr->thread_list_lock);
     781}
    786782
    787783void doregister( cluster * cltr, processor * proc ) {
    788         // lock      (cltr->proc_list_lock __cfaabi_dbg_ctx2);
    789         // push_front(cltr->procs, *proc);
    790         // unlock    (cltr->proc_list_lock);
     784        lock      (cltr->proc_list_lock __cfaabi_dbg_ctx2);
     785        push_front(cltr->procs, *proc);
     786        unlock    (cltr->proc_list_lock);
    791787}
    792788
    793789void unregister( cluster * cltr, processor * proc ) {
    794         // lock  (cltr->proc_list_lock __cfaabi_dbg_ctx2);
    795         // remove(cltr->procs, *proc );
    796         // unlock(cltr->proc_list_lock);
     790        lock  (cltr->proc_list_lock __cfaabi_dbg_ctx2);
     791        remove(cltr->procs, *proc );
     792        unlock(cltr->proc_list_lock);
    797793}
    798794
  • src/libcfa/concurrency/kernel_private.h

    r3ef35bd reba74ba  
    101101
    102102
    103 void doregister( struct thread_desc & thrd );
    104 void unregister( struct thread_desc & thrd );
     103void doregister( struct cluster & cltr );
     104void unregister( struct cluster & cltr );
    105105
    106 void doregister( struct cluster     & cltr );
    107 void unregister( struct cluster     & cltr );
     106void doregister( struct cluster * cltr, struct thread_desc & thrd );
     107void unregister( struct cluster * cltr, struct thread_desc & thrd );
    108108
    109109void doregister( struct cluster * cltr, struct processor * proc );
  • src/libcfa/concurrency/preemption.c

    r3ef35bd reba74ba  
    1515
    1616#include "preemption.h"
     17#include <assert.h>
    1718
    1819extern "C" {
     
    9192        //Loop throught every thing expired
    9293        while( node = get_expired( alarms, currtime ) ) {
     94                // __cfaabi_dbg_print_buffer_decl( " KERNEL: preemption tick.\n" );
    9395
    9496                // Check if this is a kernel
     
    103105                Duration period = node->period;
    104106                if( period > 0 ) {
     107                        // __cfaabi_dbg_print_buffer_local( " KERNEL: alarm period is %lu.\n", period.tv );
    105108                        node->alarm = currtime + period;    // Alarm is periodic, add currtime to it (used cached current time)
    106109                        insert( alarms, node );             // Reinsert the node for the next time it triggers
     
    112115
    113116        // If there are still alarms pending, reset the timer
    114         if( alarms->head ) { __kernel_set_timer( alarms->head->alarm - currtime ); }
     117        if( alarms->head ) {
     118                __cfaabi_dbg_print_buffer_decl( " KERNEL: @%lu(%lu) resetting alarm to %lu.\n", currtime.tv, __kernel_get_time().tv, (alarms->head->alarm - currtime).tv);
     119                Duration delta = alarms->head->alarm - currtime;
     120                Duration caped = max(delta, 50`us);
     121                // itimerval tim  = { caped };
     122                // __cfaabi_dbg_print_buffer_local( "    Values are %lu, %lu, %lu %lu.\n", delta.tv, caped.tv, tim.it_value.tv_sec, tim.it_value.tv_usec);
     123
     124                __kernel_set_timer( caped );
     125        }
    115126}
    116127
     
    150161        void disable_interrupts() {
    151162                with( kernelTLS.preemption_state ) {
    152                         enabled = false;
     163                        static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     164
     165                        // Set enabled flag to false
     166                        // should be atomic to avoid preemption in the middle of the operation.
     167                        // use memory order RELAXED since there is no inter-thread on this variable requirements
     168                        __atomic_store_n(&enabled, false, __ATOMIC_RELAXED);
     169
     170                        // Signal the compiler that a fence is needed but only for signal handlers
     171                        __atomic_signal_fence(__ATOMIC_ACQUIRE);
     172
    153173                        __attribute__((unused)) unsigned short new_val = disable_count + 1;
    154174                        disable_count = new_val;
     
    160180        // If counter reaches 0, execute any pending CtxSwitch
    161181        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    162                 processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add
    163                 thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic add
     182                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
     183                thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic store
    164184
    165185                with( kernelTLS.preemption_state ){
     
    170190                        // Check if we need to prempt the thread because an interrupt was missed
    171191                        if( prev == 1 ) {
    172                                 enabled = true;
     192                                static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     193
     194                                // Set enabled flag to true
     195                                // should be atomic to avoid preemption in the middle of the operation.
     196                                // use memory order RELAXED since there is no inter-thread on this variable requirements
     197                                __atomic_store_n(&enabled, true, __ATOMIC_RELAXED);
     198
     199                                // Signal the compiler that a fence is needed but only for signal handlers
     200                                __atomic_signal_fence(__ATOMIC_RELEASE);
    173201                                if( proc->pending_preemption ) {
    174202                                        proc->pending_preemption = false;
     
    189217                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    190218                if( prev == 1 ) {
    191                         kernelTLS.preemption_state.enabled = true;
     219                        static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
     220                        // Set enabled flag to true
     221                        // should be atomic to avoid preemption in the middle of the operation.
     222                        // use memory order RELAXED since there is no inter-thread on this variable requirements
     223                        __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);
     224
     225                        // Signal the compiler that a fence is needed but only for signal handlers
     226                        __atomic_signal_fence(__ATOMIC_RELEASE);
    192227                }
    193228        }
     
    335370        if( !preemption_ready() ) { return; }
    336371
    337         __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p).\n", kernelTLS.this_processor, kernelTLS.this_thread );
     372        __cfaabi_dbg_print_buffer_decl( " KERNEL: preempting core %p (%p @ %p).\n", kernelTLS.this_processor, kernelTLS.this_thread, (void *)(cxt->uc_mcontext.CFA_REG_IP) );
    338373
    339374        // Sync flag : prevent recursive calls to the signal handler
    340375        kernelTLS.preemption_state.in_progress = true;
    341376
    342         // We are about to CtxSwitch out of the signal handler, let other handlers in
    343         signal_unblock( SIGUSR1 );
     377        // Clear sighandler mask before context switching.
     378        static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" );
     379        if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) {
     380                abort( "internal error, sigprocmask" );
     381        }
    344382
    345383        // TODO: this should go in finish action
     
    377415                                case EAGAIN :
    378416                                case EINTR :
     417                                        {__cfaabi_dbg_print_buffer_decl( " KERNEL: Spurious wakeup %d.\n", err );}
    379418                                        continue;
    380419                        case EINVAL :
     
    424463        sigset_t oldset;
    425464        int ret;
    426         ret = sigprocmask(0, NULL, &oldset);
     465        ret = pthread_sigmask(0, NULL, &oldset);
    427466        if(ret != 0) { abort("ERROR sigprocmask returned %d", ret); }
    428467
    429468        ret = sigismember(&oldset, SIGUSR1);
    430469        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
    431 
    432470        if(ret == 1) { abort("ERROR SIGUSR1 is disabled"); }
     471
     472        ret = sigismember(&oldset, SIGALRM);
     473        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
     474        if(ret == 0) { abort("ERROR SIGALRM is enabled"); }
     475
     476        ret = sigismember(&oldset, SIGTERM);
     477        if(ret <  0) { abort("ERROR sigismember returned %d", ret); }
     478        if(ret == 1) { abort("ERROR SIGTERM is disabled"); }
    433479}
    434480
  • src/libcfa/concurrency/thread.c

    r3ef35bd reba74ba  
    4242        node.next = NULL;
    4343        node.prev = NULL;
    44         doregister(this);
     44        doregister(curr_cluster, this);
    4545
    4646        monitors{ &self_mon_p, 1, (fptr_t)0 };
     
    4848
    4949void ^?{}(thread_desc& this) with( this ) {
    50         unregister(this);
     50        unregister(curr_cluster, this);
    5151        ^self_cor{};
    5252}
Note: See TracChangeset for help on using the changeset viewer.