Changeset e873838 for libcfa/src/concurrency/kernel_private.hfa
- Timestamp:
- Nov 2, 2020, 12:44:43 PM (4 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 58688bf, 82f791f
- Parents:
- f7136f7
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel_private.hfa
rf7136f7 re873838 33 33 } 34 34 35 void __schedule_thread( struct __processor_id_t *,$thread * )35 void __schedule_thread( $thread * ) 36 36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__)) 37 __attribute__((nonnull ( 2)))37 __attribute__((nonnull (1))) 38 38 #endif 39 39 ; … … 63 63 ) 64 64 65 // KERNEL ONLY unpark with out disabling interrupts66 void __unpark( struct __processor_id_t *, $thread * thrd );67 68 65 #define TICKET_BLOCKED (-1) // thread is blocked 69 66 #define TICKET_RUNNING ( 0) // thread is running 70 67 #define TICKET_UNBLOCK ( 1) // thread should ignore next block 71 72 static inline bool __post(single_sem & this, struct __processor_id_t * id) {73 for() {74 struct $thread * expected = this.ptr;75 if(expected == 1p) return false;76 if(expected == 0p) {77 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {78 return false;79 }80 }81 else {82 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {83 __unpark( id, expected );84 return true;85 }86 }87 }88 }89 68 90 69 //----------------------------------------------------------------------------- … … 201 180 // Reader side : acquire when using the ready queue to schedule but not 202 181 // creating/destroying queues 203 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) { 204 unsigned iproc = proc->id; 205 /*paranoid*/ verify(data[iproc].handle == proc); 182 static inline void ready_schedule_lock(void) with(*__scheduler_lock) { 183 /*paranoid*/ verify( kernelTLS.this_proc_id ); 184 185 unsigned iproc = kernelTLS.this_proc_id->id; 186 /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id); 206 187 /*paranoid*/ verify(iproc < ready); 207 188 … … 225 206 } 226 207 227 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) { 228 unsigned iproc = proc->id; 229 /*paranoid*/ verify(data[iproc].handle == proc); 208 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { 209 /*paranoid*/ verify( kernelTLS.this_proc_id ); 210 211 unsigned iproc = kernelTLS.this_proc_id->id; 212 /*paranoid*/ verify(data[iproc].handle == kernelTLS.this_proc_id); 230 213 /*paranoid*/ verify(iproc < ready); 231 214 /*paranoid*/ verify(data[iproc].lock); … … 239 222 240 223 #ifdef __CFA_WITH_VERIFY__ 241 static inline bool ready_schedule_islocked( struct __processor_id_t * proc) { 224 static inline bool ready_schedule_islocked(void) { 225 /*paranoid*/ verify( kernelTLS.this_proc_id ); 226 __processor_id_t * proc = kernelTLS.this_proc_id; 242 227 return __scheduler_lock->data[proc->id].owned; 243 228 }
Note: See TracChangeset
for help on using the changeset viewer.