Changeset 054514d for src/libcfa


Ignore:
Timestamp:
May 29, 2018, 3:26:31 PM (8 years ago)
Author:
Rob Schluntz <rschlunt@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
Children:
3530f39a
Parents:
96812c0 (diff), da60c631 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the (diff) links above to see all the changes relative to each parent.
Message:

Merge branch 'master' of plg.uwaterloo.ca:/u/cforall/software/cfa/cfa-cc

Location:
src/libcfa
Files:
3 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/bits/locks.h

    r96812c0 r054514d  
    3939#endif
    4040
    41 #if __SIZEOF_SIZE_T__ == 8
    42         #define __lock_test_and_test_and_set( lock ) (lock) == 0 && __sync_lock_test_and_set_8( &(lock), 1 ) == 0
    43         #define __lock_release( lock ) __sync_lock_release_8( &(lock) );
    44 #elif __SIZEOF_SIZE_T__ == 4
    45         #define __lock_test_and_test_and_set( lock ) (lock) == 0 && __sync_lock_test_and_set_4( &(lock), 1 ) == 0
    46         #define __lock_release( lock ) __sync_lock_release_4( &(lock) );
    47 #else
    48         #error unsupported architecture
    49 #endif
    50 
    5141struct __spinlock_t {
    52         __ALIGN__ volatile size_t lock;
     42        // Wrap in struct to prevent false sharing with debug info
     43        struct {
     44                // Align lock on 128-bit boundary
     45                __ALIGN__ volatile _Bool lock;
     46        };
    5347        #ifdef __CFA_DEBUG__
     48                // previous function to acquire the lock
    5449                const char * prev_name;
     50                // previous thread to acquire the lock
    5551                void* prev_thrd;
    5652        #endif
     
    7874        // Lock the spinlock, return false if already acquired
    7975        static inline _Bool try_lock  ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {
    80                 _Bool result = __lock_test_and_test_and_set( this.lock );
     76                _Bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0);
    8177                if( result ) {
    8278                        disable_interrupts();
     
    9490
    9591                for ( unsigned int i = 1;; i += 1 ) {
    96                         if ( __lock_test_and_test_and_set( this.lock ) ) break;
     92                        if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break;
    9793                        #ifndef NOEXPBACK
    9894                                // exponential spin
     
    112108        }
    113109
    114         // // Lock the spinlock, yield if already acquired
    115         // static inline void lock_yield( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {
    116         //      for ( unsigned int i = 1;; i += 1 ) {
    117         //              if ( __lock_test_and_test_and_set( this.lock ) ) break;
    118         //              yield( i );
    119         //      }
    120         //      disable_interrupts();
    121         //      __cfaabi_dbg_debug_do(
    122         //              this.prev_name = caller;
    123         //              this.prev_thrd = this_thread;
    124         //      )
    125         // }
    126 
    127110        static inline void unlock( __spinlock_t & this ) {
    128111                enable_interrupts_noPoll();
    129                 __lock_release( this.lock );
     112                __atomic_clear( &this.lock, __ATOMIC_RELEASE );
    130113        }
    131114#endif
  • src/libcfa/concurrency/alarm.c

    r96812c0 r054514d  
    1010// Created On       : Fri Jun 2 11:31:25 2017
    1111// Last Modified By : Peter A. Buhr
    12 // Last Modified On : Mon Apr  9 13:36:18 2018
    13 // Update Count     : 61
     12// Last Modified On : Fri May 25 06:25:47 2018
     13// Update Count     : 67
    1414//
    1515
     
    3737
    3838void __kernel_set_timer( Duration alarm ) {
    39         verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%luns)", alarm.tv);
     39        verifyf(alarm >= 1`us || alarm == 0, "Setting timer to < 1us (%jins)", alarm.tv);
    4040        setitimer( ITIMER_REAL, &(itimerval){ alarm }, NULL );
    4141}
  • src/libcfa/concurrency/preemption.c

    r96812c0 r054514d  
    161161        void disable_interrupts() {
    162162                with( kernelTLS.preemption_state ) {
    163                         enabled = false;
     163                        #if GCC_VERSION > 50000
     164                        static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     165                        #endif
     166
     167                        // Set enabled flag to false
     168                        // should be atomic to avoid preemption in the middle of the operation.
     169                        // use memory order RELAXED since there is no inter-thread on this variable requirements
     170                        __atomic_store_n(&enabled, false, __ATOMIC_RELAXED);
     171
     172                        // Signal the compiler that a fence is needed but only for signal handlers
     173                        __atomic_signal_fence(__ATOMIC_ACQUIRE);
     174
    164175                        __attribute__((unused)) unsigned short new_val = disable_count + 1;
    165176                        disable_count = new_val;
     
    171182        // If counter reaches 0, execute any pending CtxSwitch
    172183        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    173                 processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add
    174                 thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic add
     184                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
     185                thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic store
    175186
    176187                with( kernelTLS.preemption_state ){
     
    181192                        // Check if we need to prempt the thread because an interrupt was missed
    182193                        if( prev == 1 ) {
    183                                 enabled = true;
     194                                #if GCC_VERSION > 50000
     195                                static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     196                                #endif
     197
     198                                // Set enabled flag to true
     199                                // should be atomic to avoid preemption in the middle of the operation.
     200                                // use memory order RELAXED since there is no inter-thread on this variable requirements
     201                                __atomic_store_n(&enabled, true, __ATOMIC_RELAXED);
     202
     203                                // Signal the compiler that a fence is needed but only for signal handlers
     204                                __atomic_signal_fence(__ATOMIC_RELEASE);
    184205                                if( proc->pending_preemption ) {
    185206                                        proc->pending_preemption = false;
     
    200221                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    201222                if( prev == 1 ) {
    202                         kernelTLS.preemption_state.enabled = true;
     223                        #if GCC_VERSION > 50000
     224                        static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
     225                        #endif
     226                        // Set enabled flag to true
     227                        // should be atomic to avoid preemption in the middle of the operation.
     228                        // use memory order RELAXED since there is no inter-thread on this variable requirements
     229                        __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);
     230
     231                        // Signal the compiler that a fence is needed but only for signal handlers
     232                        __atomic_signal_fence(__ATOMIC_RELEASE);
    203233                }
    204234        }
     
    352382
    353383        // Clear sighandler mask before context switching.
     384        #if GCC_VERSION > 50000
    354385        static_assert( sizeof( sigset_t ) == sizeof( cxt->uc_sigmask ), "Expected cxt->uc_sigmask to be of sigset_t" );
     386        #endif
    355387        if ( pthread_sigmask( SIG_SETMASK, (sigset_t *)&(cxt->uc_sigmask), NULL ) == -1 ) {
    356388                abort( "internal error, sigprocmask" );
Note: See TracChangeset for help on using the changeset viewer.