Ignore:
Timestamp:
Nov 26, 2019, 3:19:20 PM (5 years ago)
Author:
Thierry Delisle <tdelisle@…>
Branches:
ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast, new-ast-unique-expr, pthread-emulation, qualifiedEnum
Children:
30763fd
Parents:
21184e3
Message:

First step at adding the new ready queue to Cforall

File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel_private.hfa

    r21184e3 r7768b8d  
    9999//-----------------------------------------------------------------------------
    100100// Utils
    101 #define KERNEL_STORAGE(T,X) static char storage_##X[sizeof(T)]
     101#define KERNEL_STORAGE(T,X) __attribute((aligned(__alignof__(T)))) static char storage_##X[sizeof(T)]
    102102
    103103static inline uint32_t tls_rand() {
     
    115115void unregister( struct cluster * cltr, struct thread_desc & thrd );
    116116
    117 void doregister( struct cluster * cltr, struct processor * proc );
    118 void unregister( struct cluster * cltr, struct processor * proc );
     117//=======================================================================
     118// Cluster lock API
     119//=======================================================================
     120struct __attribute__((aligned(64))) __processor_id {
     121        processor * volatile handle;
     122        volatile bool lock;
     123};
     124
     125// Lock-Free registering/unregistering of threads
     126// Register a processor to a given cluster and get its unique id in return
     127unsigned doregister( struct cluster * cltr, struct processor * proc );
     128
     129// Unregister a processor from a given cluster using its id, getting back the original pointer
     130void     unregister( struct cluster * cltr, struct processor * proc );
     131
     132//=======================================================================
     133// Reader-writer lock implementation
     134// Concurrent with doregister/unregister,
     135//    i.e., threads can be added at any point during or between the entry/exit
     136static inline void __atomic_acquire(volatile bool * ll) {
     137        while( __builtin_expect(__atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST), false) ) {
     138                while(__atomic_load_n(ll, (int)__ATOMIC_RELAXED))
     139                        asm volatile("pause");
     140        }
     141        /* paranoid */ verify(*ll);
     142}
     143
     144static inline bool __atomic_try_acquire(volatile bool * ll) {
     145        return __atomic_exchange_n(ll, (bool)true, __ATOMIC_SEQ_CST);
     146}
     147
     148static inline void __atomic_unlock(volatile bool * ll) {
     149        /* paranoid */ verify(*ll);
     150        __atomic_store_n(ll, (bool)false, __ATOMIC_RELEASE);
     151}
     152
     153//-----------------------------------------------------------------------
     154// Reader side : acquire when using the ready queue to schedule but not
     155//  creating/destroying queues
     156static inline void ready_schedule_lock( struct cluster & cltr, struct processor * proc) with(cltr.ready_lock) {
     157        unsigned iproc = proc->id;
     158        /*paranoid*/ verify(data[iproc].handle == proc);
     159        /*paranoid*/ verify(iproc < ready);
     160
     161        // Step 1 : make sure no writer are in the middle of the critical section
     162        while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
     163                asm volatile("pause");
     164
     165        // Fence needed because we don't want to start trying to acquire the lock
     166        // before we read a false.
     167        // Not needed on x86
     168        // std::atomic_thread_fence(std::memory_order_seq_cst);
     169
     170        // Step 2 : acquire our local lock
     171        __atomic_acquire( &data[iproc].lock );
     172        /*paranoid*/ verify(data[iproc].lock);
     173}
     174
     175static inline void ready_schedule_unlock( struct cluster & cltr, struct processor * proc) with(cltr.ready_lock) {
     176        unsigned iproc = proc->id;
     177        /*paranoid*/ verify(data[iproc].handle == proc);
     178        /*paranoid*/ verify(iproc < ready);
     179        /*paranoid*/ verify(data[iproc].lock);
     180        __atomic_store_n(&data[iproc].lock, false, __ATOMIC_RELEASE);
     181}
     182
     183//-----------------------------------------------------------------------
     184// Writer side : acquire when changing the ready queue, e.g. adding more
     185//  queues or removing them.
     186uint_fast32_t ready_mutate_lock( struct cluster & cltr );
     187
     188void ready_mutate_unlock( struct cluster & cltr, uint_fast32_t );
     189
     190bool push(__intrusive_ready_queue_t & this, thread_desc * node);
     191[thread_desc *, bool] pop(__intrusive_ready_queue_t & this);
    119192
    120193// Local Variables: //
Note: See TracChangeset for help on using the changeset viewer.