Ignore:
File:
1 edited

Legend:

Unmodified
Added
Removed
  • libcfa/src/concurrency/kernel_private.hfa

    r8fc652e0 r6a77224  
    3333}
    3434
    35 void __schedule_thread( $thread * )
     35void __schedule_thread( struct __processor_id_t *, $thread * )
    3636#if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__))
    37         __attribute__((nonnull (1)))
     37        __attribute__((nonnull (2)))
    3838#endif
    3939;
    40 
    41 extern bool __preemption_enabled();
    4240
    4341//release/wake-up the following resources
     
    6563)
    6664
     65// KERNEL ONLY unpark with out disabling interrupts
     66void __unpark( struct __processor_id_t *, $thread * thrd );
     67
    6768#define TICKET_BLOCKED (-1) // thread is blocked
    6869#define TICKET_RUNNING ( 0) // thread is running
    6970#define TICKET_UNBLOCK ( 1) // thread should ignore next block
     71
     72static inline bool __post(single_sem & this, struct __processor_id_t * id) {
     73        for() {
     74                struct $thread * expected = this.ptr;
     75                if(expected == 1p) return false;
     76                if(expected == 0p) {
     77                        if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
     78                                return false;
     79                        }
     80                }
     81                else {
     82                        if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
     83                                __unpark( id, expected );
     84                                return true;
     85                        }
     86                }
     87        }
     88}
    7089
    7190//-----------------------------------------------------------------------------
     
    182201// Reader side : acquire when using the ready queue to schedule but not
    183202//  creating/destroying queues
    184 static inline void ready_schedule_lock(void) with(*__scheduler_lock) {
    185         /* paranoid */ verify( ! __preemption_enabled() );
    186         /* paranoid */ verify( kernelTLS().this_proc_id );
    187 
    188         unsigned iproc = kernelTLS().this_proc_id->id;
    189         /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
     203static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) {
     204        unsigned iproc = proc->id;
     205        /*paranoid*/ verify(data[iproc].handle == proc);
    190206        /*paranoid*/ verify(iproc < ready);
    191207
     
    209225}
    210226
    211 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) {
    212         /* paranoid */ verify( ! __preemption_enabled() );
    213         /* paranoid */ verify( kernelTLS().this_proc_id );
    214 
    215         unsigned iproc = kernelTLS().this_proc_id->id;
    216         /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id);
     227static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) {
     228        unsigned iproc = proc->id;
     229        /*paranoid*/ verify(data[iproc].handle == proc);
    217230        /*paranoid*/ verify(iproc < ready);
    218231        /*paranoid*/ verify(data[iproc].lock);
     
    226239
    227240#ifdef __CFA_WITH_VERIFY__
    228         static inline bool ready_schedule_islocked(void) {
    229                 /* paranoid */ verify( ! __preemption_enabled() );
    230                 /*paranoid*/ verify( kernelTLS().this_proc_id );
    231                 __processor_id_t * proc = kernelTLS().this_proc_id;
     241        static inline bool ready_schedule_islocked( struct __processor_id_t * proc) {
    232242                return __scheduler_lock->data[proc->id].owned;
    233243        }
Note: See TracChangeset for help on using the changeset viewer.