Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel_private.hfa

    rc993b15 rfc59df78  
    2525// Scheduler
    2626
     27struct __attribute__((aligned(128))) __scheduler_lock_id_t;
    2728
    2829extern "C" {
    2930        void disable_interrupts() OPTIONAL_THREAD;
    30         void enable_interrupts( bool poll = true );
    31 }
    32 
    33 void schedule_thread$( $thread * ) __attribute__((nonnull (1)));
     31        void enable_interrupts_noPoll();
     32        void enable_interrupts( __cfaabi_dbg_ctx_param );
     33}
     34
     35void __schedule_thread( $thread * )
     36#if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
     37        __attribute__((nonnull (1)))
     38#endif
     39;
    3440
    3541extern bool __preemption_enabled();
     
    7985// Lock-Free registering/unregistering of threads
    8086// Register a processor to a given cluster and get its unique id in return
    81 unsigned register_proc_id( void );
     87void register_proc_id( struct __processor_id_t * );
    8288
    8389// Unregister a processor from a given cluster using its id, getting back the original pointer
    84 void unregister_proc_id( unsigned );
     90void unregister_proc_id( struct __processor_id_t * proc );
    8591
    8692//=======================================================================
     
    111117}
    112118
    113 
    114 
    115 
     119// Cells use by the reader writer lock
     120// while not generic it only relies on a opaque pointer
     121struct __attribute__((aligned(128))) __scheduler_lock_id_t {
     122        // Spin lock used as the underlying lock
     123        volatile bool lock;
     124
     125        // Handle pointing to the proc owning this cell
     126        // Used for allocating cells and debugging
     127        __processor_id_t * volatile handle;
     128
     129        #ifdef __CFA_WITH_VERIFY__
     130                // Debug, check if this is owned for reading
     131                bool owned;
     132        #endif
     133};
     134
     135static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
    116136
    117137//-----------------------------------------------------------------------
     
    132152
    133153        // writer lock
    134         volatile bool write_lock;
     154        volatile bool lock;
    135155
    136156        // data pointer
    137         volatile bool * volatile * data;
     157        __scheduler_lock_id_t * data;
    138158};
    139159
     
    148168static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
    149169        /* paranoid */ verify( ! __preemption_enabled() );
    150         /* paranoid */ verify( ! kernelTLS().in_sched_lock );
    151         /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
    152         /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
     170        /* paranoid */ verify( kernelTLS().this_proc_id );
     171
     172        unsigned iproc = kernelTLS().this_proc_id->id;
     173        /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
     174        /*paranoid*/ verify(iproc < ready);
    153175
    154176        // Step 1 : make sure no writer are in the middle of the critical section
    155         while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED))
     177        while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
    156178                Pause();
    157179
     
    162184
    163185        // Step 2 : acquire our local lock
    164         __atomic_acquire( &kernelTLS().sched_lock );
    165         /*paranoid*/ verify(kernelTLS().sched_lock);
     186        __atomic_acquire( &data[iproc].lock );
     187        /*paranoid*/ verify(data[iproc].lock);
    166188
    167189        #ifdef __CFA_WITH_VERIFY__
    168190                // Debug, check if this is owned for reading
    169                 kernelTLS().in_sched_lock = true;
     191                data[iproc].owned = true;
    170192        #endif
    171193}
     
    173195static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
    174196        /* paranoid */ verify( ! __preemption_enabled() );
    175         /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
    176         /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
    177         /* paranoid */ verify( kernelTLS().sched_lock );
    178         /* paranoid */ verify( kernelTLS().in_sched_lock );
     197        /* paranoid */ verify( kernelTLS().this_proc_id );
     198
     199        unsigned iproc = kernelTLS().this_proc_id->id;
     200        /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
     201        /*paranoid*/ verify(iproc < ready);
     202        /*paranoid*/ verify(data[iproc].lock);
     203        /*paranoid*/ verify(data[iproc].owned);
    179204        #ifdef __CFA_WITH_VERIFY__
    180205                // Debug, check if this is owned for reading
    181                 kernelTLS().in_sched_lock = false;
     206                data[iproc].owned = false;
    182207        #endif
    183         __atomic_unlock(&kernelTLS().sched_lock);
     208        __atomic_unlock(&data[iproc].lock);
    184209}
    185210
     
    187212        static inline bool ready_schedule_islocked(void) {
    188213                /* paranoid */ verify( ! __preemption_enabled() );
    189                 /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock );
    190                 return kernelTLS().sched_lock;
     214                /*paranoid*/ verify( kernelTLS().this_proc_id );
     215                __processor_id_t * proc = kernelTLS().this_proc_id;
     216                return __scheduler_lock->data[proc->id].owned;
    191217        }
    192218
    193219        static inline bool ready_mutate_islocked() {
    194                 return __scheduler_lock->write_lock;
     220                return __scheduler_lock->lock;
    195221        }
    196222#endif
     
    207233// Register a processor to a given cluster and get its unique id in return
    208234// For convenience, also acquires the lock
    209 static inline [unsigned, uint_fast32_t] ready_mutate_register() {
    210         unsigned id = register_proc_id();
    211         uint_fast32_t last = ready_mutate_lock();
    212         return [id, last];
     235static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) {
     236        register_proc_id( proc );
     237        return ready_mutate_lock();
    213238}
    214239
    215240// Unregister a processor from a given cluster using its id, getting back the original pointer
    216241// assumes the lock is acquired
    217 static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) {
     242static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) {
    218243        ready_mutate_unlock( last_s );
    219         unregister_proc_id( id );
     244        unregister_proc_id( proc );
    220245}
    221246
Note: See TracChangeset for help on using the changeset viewer.