- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel_private.hfa
r8fc652e0 r6a77224 33 33 } 34 34 35 void __schedule_thread( $thread * )35 void __schedule_thread( struct __processor_id_t *, $thread * ) 36 36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__)) 37 __attribute__((nonnull ( 1)))37 __attribute__((nonnull (2))) 38 38 #endif 39 39 ; 40 41 extern bool __preemption_enabled();42 40 43 41 //release/wake-up the following resources … … 65 63 ) 66 64 65 // KERNEL ONLY unpark with out disabling interrupts 66 void __unpark( struct __processor_id_t *, $thread * thrd ); 67 67 68 #define TICKET_BLOCKED (-1) // thread is blocked 68 69 #define TICKET_RUNNING ( 0) // thread is running 69 70 #define TICKET_UNBLOCK ( 1) // thread should ignore next block 71 72 static inline bool __post(single_sem & this, struct __processor_id_t * id) { 73 for() { 74 struct $thread * expected = this.ptr; 75 if(expected == 1p) return false; 76 if(expected == 0p) { 77 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 78 return false; 79 } 80 } 81 else { 82 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 83 __unpark( id, expected ); 84 return true; 85 } 86 } 87 } 88 } 70 89 71 90 //----------------------------------------------------------------------------- … … 182 201 // Reader side : acquire when using the ready queue to schedule but not 183 202 // creating/destroying queues 184 static inline void ready_schedule_lock(void) with(*__scheduler_lock) { 185 /* paranoid */ verify( ! __preemption_enabled() ); 186 /* paranoid */ verify( kernelTLS().this_proc_id ); 187 188 unsigned iproc = kernelTLS().this_proc_id->id; 189 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id); 203 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) { 204 unsigned iproc = proc->id; 205 /*paranoid*/ verify(data[iproc].handle == proc); 190 206 /*paranoid*/ verify(iproc < ready); 191 207 … … 209 225 } 210 226 211 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { 212 /* paranoid */ verify( ! __preemption_enabled() ); 213 /* paranoid */ verify( kernelTLS().this_proc_id ); 214 215 unsigned iproc = kernelTLS().this_proc_id->id; 216 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id); 227 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) { 228 unsigned iproc = proc->id; 229 /*paranoid*/ verify(data[iproc].handle == proc); 217 230 /*paranoid*/ verify(iproc < ready); 218 231 /*paranoid*/ verify(data[iproc].lock); … … 226 239 227 240 #ifdef __CFA_WITH_VERIFY__ 228 static inline bool ready_schedule_islocked(void) { 229 /* paranoid */ verify( ! __preemption_enabled() ); 230 /*paranoid*/ verify( kernelTLS().this_proc_id ); 231 __processor_id_t * proc = kernelTLS().this_proc_id; 241 static inline bool ready_schedule_islocked( struct __processor_id_t * proc) { 232 242 return __scheduler_lock->data[proc->id].owned; 233 243 }
Note: See TracChangeset
for help on using the changeset viewer.