Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel_private.hfa

    rfc59df78 rb808625  
    2525// Scheduler
    2626
    27 struct __attribute__((aligned(128))) __scheduler_lock_id_t;
    2827
    2928extern "C" {
    3029        void disable_interrupts() OPTIONAL_THREAD;
    31         void enable_interrupts_noPoll();
    32         void enable_interrupts( __cfaabi_dbg_ctx_param );
    33 }
    34 
    35 void __schedule_thread( $thread * )
    36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
    37         __attribute__((nonnull (1)))
    38 #endif
    39 ;
     30        void enable_interrupts( bool poll = true );
     31}
     32
     33void schedule_thread$( $thread * ) __attribute__((nonnull (1)));
    4034
    4135extern bool __preemption_enabled();
     
    8579// Lock-Free registering/unregistering of threads
    8680// Register a processor to a given cluster and get its unique id in return
    87 void register_proc_id( struct __processor_id_t * );
     81unsigned register_proc_id( void );
    8882
    8983// Unregister a processor from a given cluster using its id, getting back the original pointer
    90 void unregister_proc_id( struct __processor_id_t * proc );
     84void unregister_proc_id( unsigned );
    9185
    9286//=======================================================================
     
    117111}
    118112
    119 // Cells use by the reader writer lock
    120 // while not generic it only relies on a opaque pointer
    121 struct __attribute__((aligned(128))) __scheduler_lock_id_t {
    122         // Spin lock used as the underlying lock
    123         volatile bool lock;
    124 
    125         // Handle pointing to the proc owning this cell
    126         // Used for allocating cells and debugging
    127         __processor_id_t * volatile handle;
    128 
    129         #ifdef __CFA_WITH_VERIFY__
    130                 // Debug, check if this is owned for reading
    131                 bool owned;
    132         #endif
    133 };
    134 
    135 static_assert( sizeof(struct __scheduler_lock_id_t) <= __alignof(struct __scheduler_lock_id_t));
     113
     114
     115
    136116
    137117//-----------------------------------------------------------------------
     
    152132
    153133        // writer lock
    154         volatile bool lock;
     134        volatile bool write_lock;
    155135
    156136        // data pointer
    157         __scheduler_lock_id_t * data;
     137        volatile bool * volatile * data;
    158138};
    159139
     
    168148static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
    169149        /* paranoid */ verify( ! __preemption_enabled() );
    170         /* paranoid */ verify( kernelTLS().this_proc_id );
    171 
    172         unsigned iproc = kernelTLS().this_proc_id->id;
    173         /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
    174         /*paranoid*/ verify(iproc < ready);
     150        /* paranoid */ verify( ! kernelTLS().in_sched_lock );
     151        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     152        /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
    175153
    176154        // Step 1 : make sure no writer are in the middle of the critical section
    177         while(__atomic_load_n(&lock, (int)__ATOMIC_RELAXED))
     155        while(__atomic_load_n(&write_lock, (int)__ATOMIC_RELAXED))
    178156                Pause();
    179157
     
    184162
    185163        // Step 2 : acquire our local lock
    186         __atomic_acquire( &data[iproc].lock );
    187         /*paranoid*/ verify(data[iproc].lock);
     164        __atomic_acquire( &kernelTLS().sched_lock );
     165        /*paranoid*/ verify(kernelTLS().sched_lock);
    188166
    189167        #ifdef __CFA_WITH_VERIFY__
    190168                // Debug, check if this is owned for reading
    191                 data[iproc].owned = true;
     169                kernelTLS().in_sched_lock = true;
    192170        #endif
    193171}
     
    195173static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
    196174        /* paranoid */ verify( ! __preemption_enabled() );
    197         /* paranoid */ verify( kernelTLS().this_proc_id );
    198 
    199         unsigned iproc = kernelTLS().this_proc_id->id;
    200         /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
    201         /*paranoid*/ verify(iproc < ready);
    202         /*paranoid*/ verify(data[iproc].lock);
    203         /*paranoid*/ verify(data[iproc].owned);
     175        /* paranoid */ verify( data[kernelTLS().sched_id] == &kernelTLS().sched_lock );
     176        /* paranoid */ verify( !kernelTLS().this_processor || kernelTLS().this_processor->unique_id == kernelTLS().sched_id );
     177        /* paranoid */ verify( kernelTLS().sched_lock );
     178        /* paranoid */ verify( kernelTLS().in_sched_lock );
    204179        #ifdef __CFA_WITH_VERIFY__
    205180                // Debug, check if this is owned for reading
    206                 data[iproc].owned = false;
     181                kernelTLS().in_sched_lock = false;
    207182        #endif
    208         __atomic_unlock(&data[iproc].lock);
     183        __atomic_unlock(&kernelTLS().sched_lock);
    209184}
    210185
     
    212187        static inline bool ready_schedule_islocked(void) {
    213188                /* paranoid */ verify( ! __preemption_enabled() );
    214                 /*paranoid*/ verify( kernelTLS().this_proc_id );
    215                 __processor_id_t * proc = kernelTLS().this_proc_id;
    216                 return __scheduler_lock->data[proc->id].owned;
     189                /* paranoid */ verify( (!kernelTLS().in_sched_lock) || kernelTLS().sched_lock );
     190                return kernelTLS().sched_lock;
    217191        }
    218192
    219193        static inline bool ready_mutate_islocked() {
    220                 return __scheduler_lock->lock;
     194                return __scheduler_lock->write_lock;
    221195        }
    222196#endif
     
    233207// Register a processor to a given cluster and get its unique id in return
    234208// For convenience, also acquires the lock
    235 static inline uint_fast32_t ready_mutate_register( struct __processor_id_t * proc ) {
    236         register_proc_id( proc );
    237         return ready_mutate_lock();
     209static inline [unsigned, uint_fast32_t] ready_mutate_register() {
     210        unsigned id = register_proc_id();
     211        uint_fast32_t last = ready_mutate_lock();
     212        return [id, last];
    238213}
    239214
    240215// Unregister a processor from a given cluster using its id, getting back the original pointer
    241216// assumes the lock is acquired
    242 static inline void ready_mutate_unregister( struct __processor_id_t * proc, uint_fast32_t last_s ) {
     217static inline void ready_mutate_unregister( unsigned id, uint_fast32_t last_s ) {
    243218        ready_mutate_unlock( last_s );
    244         unregister_proc_id( proc );
     219        unregister_proc_id( id );
    245220}
    246221
     
    286261// push thread onto a ready queue for a cluster
    287262// returns true if the list was previously empty, false otherwise
    288 __attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd);
     263__attribute__((hot)) void push(struct cluster * cltr, struct $thread * thrd, bool local);
    289264
    290265//-----------------------------------------------------------------------
Note: See TracChangeset for help on using the changeset viewer.