Changeset 13073be for src/libcfa


Ignore:
Timestamp:
May 25, 2018, 1:37:34 PM (7 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, aaron-thesis, arm-eh, ast-experimental, cleanup-dtors, deferred_resn, demangler, enum, forall-pointer-decay, jacob/cs343-translation, jenkins-sandbox, master, new-ast, new-ast-unique-expr, new-env, no_list, persistent-indexer, pthread-emulation, qualifiedEnum, with_gc
Children:
8dbedfc
Parents:
a1a17a74
Message:

Fix atomic builtins in libcfa and prelude

Location:
src/libcfa
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • src/libcfa/bits/locks.h

    ra1a17a74 r13073be  
    3939#endif
    4040
    41 #if __SIZEOF_SIZE_T__ == 8
    42         #define __lock_test_and_test_and_set( lock ) (lock) == 0 && __sync_lock_test_and_set_8( &(lock), 1 ) == 0
    43         #define __lock_release( lock ) __sync_lock_release_8( &(lock) );
    44 #elif __SIZEOF_SIZE_T__ == 4
    45         #define __lock_test_and_test_and_set( lock ) (lock) == 0 && __sync_lock_test_and_set_4( &(lock), 1 ) == 0
    46         #define __lock_release( lock ) __sync_lock_release_4( &(lock) );
    47 #else
    48         #error unsupported architecture
    49 #endif
    50 
    5141struct __spinlock_t {
    52         __ALIGN__ volatile size_t lock;
     42        // Wrap in struct to prevent false sharing with debug info
     43        struct {
     44                // Align lock on 128-bit boundary
     45                __ALIGN__ volatile _Bool lock;
     46        };
    5347        #ifdef __CFA_DEBUG__
     48                // previous function to acquire the lock
    5449                const char * prev_name;
     50                // previous thread to acquire the lock
    5551                void* prev_thrd;
    5652        #endif
     
    7874        // Lock the spinlock, return false if already acquired
    7975        static inline _Bool try_lock  ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {
    80                 _Bool result = __lock_test_and_test_and_set( this.lock );
     76                _Bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0);
    8177                if( result ) {
    8278                        disable_interrupts();
     
    9490
    9591                for ( unsigned int i = 1;; i += 1 ) {
    96                         if ( __lock_test_and_test_and_set( this.lock ) ) break;
     92                        if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break;
    9793                        #ifndef NOEXPBACK
    9894                                // exponential spin
     
    112108        }
    113109
    114         // // Lock the spinlock, yield if already acquired
    115         // static inline void lock_yield( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) {
    116         //      for ( unsigned int i = 1;; i += 1 ) {
    117         //              if ( __lock_test_and_test_and_set( this.lock ) ) break;
    118         //              yield( i );
    119         //      }
    120         //      disable_interrupts();
    121         //      __cfaabi_dbg_debug_do(
    122         //              this.prev_name = caller;
    123         //              this.prev_thrd = this_thread;
    124         //      )
    125         // }
    126 
    127110        static inline void unlock( __spinlock_t & this ) {
    128111                enable_interrupts_noPoll();
    129                 __lock_release( this.lock );
     112                __atomic_clear( &this.lock, __ATOMIC_RELEASE );
    130113        }
    131114#endif
  • src/libcfa/concurrency/preemption.c

    ra1a17a74 r13073be  
    161161        void disable_interrupts() {
    162162                with( kernelTLS.preemption_state ) {
    163                         enabled = false;
     163                        static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     164
     165                        // Set enabled flag to false
     166                        // should be atomic to avoid preemption in the middle of the operation.
     167                        // use memory order RELAXED since there is no inter-thread on this variable requirements
     168                        __atomic_store_n(&enabled, false, __ATOMIC_RELAXED);
     169
     170                        // Signal the compiler that a fence is needed but only for signal handlers
     171                        __atomic_signal_fence(__ATOMIC_ACQUIRE);
     172
    164173                        __attribute__((unused)) unsigned short new_val = disable_count + 1;
    165174                        disable_count = new_val;
     
    171180        // If counter reaches 0, execute any pending CtxSwitch
    172181        void enable_interrupts( __cfaabi_dbg_ctx_param ) {
    173                 processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic add
    174                 thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic add
     182                processor   * proc = kernelTLS.this_processor; // Cache the processor now since interrupts can start happening after the atomic store
     183                thread_desc * thrd = kernelTLS.this_thread;       // Cache the thread now since interrupts can start happening after the atomic store
    175184
    176185                with( kernelTLS.preemption_state ){
     
    181190                        // Check if we need to prempt the thread because an interrupt was missed
    182191                        if( prev == 1 ) {
    183                                 enabled = true;
     192                                static_assert(__atomic_always_lock_free(sizeof(enabled), &enabled), "Must be lock-free");
     193
     194                                // Set enabled flag to true
     195                                // should be atomic to avoid preemption in the middle of the operation.
     196                                // use memory order RELAXED since there is no inter-thread on this variable requirements
     197                                __atomic_store_n(&enabled, true, __ATOMIC_RELAXED);
     198
     199                                // Signal the compiler that a fence is needed but only for signal handlers
     200                                __atomic_signal_fence(__ATOMIC_RELEASE);
    184201                                if( proc->pending_preemption ) {
    185202                                        proc->pending_preemption = false;
     
    200217                verifyf( prev != 0u, "Incremented from %u\n", prev );                     // If this triggers someone is enabled already enabled interrupts
    201218                if( prev == 1 ) {
    202                         kernelTLS.preemption_state.enabled = true;
     219                        static_assert(__atomic_always_lock_free(sizeof(kernelTLS.preemption_state.enabled), &kernelTLS.preemption_state.enabled), "Must be lock-free");
     220                        // Set enabled flag to true
     221                        // should be atomic to avoid preemption in the middle of the operation.
     222                        // use memory order RELAXED since there is no inter-thread on this variable requirements
     223                        __atomic_store_n(&kernelTLS.preemption_state.enabled, true, __ATOMIC_RELAXED);
     224
     225                        // Signal the compiler that a fence is needed but only for signal handlers
     226                        __atomic_signal_fence(__ATOMIC_RELEASE);
    203227                }
    204228        }
Note: See TracChangeset for help on using the changeset viewer.