Changeset 139775e for libcfa/src/concurrency/kernel_private.hfa
- Timestamp:
- Nov 6, 2020, 4:48:52 PM (3 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 75baaa3
- Parents:
- 55acc3a (diff), 836c9925 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/kernel_private.hfa
r55acc3a r139775e 33 33 } 34 34 35 void __schedule_thread( struct __processor_id_t *,$thread * )35 void __schedule_thread( $thread * ) 36 36 #if defined(NDEBUG) || (!defined(__CFA_DEBUG__) && !defined(__CFA_VERIFY__)) 37 __attribute__((nonnull ( 2)))37 __attribute__((nonnull (1))) 38 38 #endif 39 39 ; 40 41 extern bool __preemption_enabled(); 40 42 41 43 //release/wake-up the following resources … … 63 65 ) 64 66 65 // KERNEL ONLY unpark with out disabling interrupts66 void __unpark( struct __processor_id_t *, $thread * thrd );67 68 67 #define TICKET_BLOCKED (-1) // thread is blocked 69 68 #define TICKET_RUNNING ( 0) // thread is running 70 69 #define TICKET_UNBLOCK ( 1) // thread should ignore next block 71 72 static inline bool __post(single_sem & this, struct __processor_id_t * id) {73 for() {74 struct $thread * expected = this.ptr;75 if(expected == 1p) return false;76 if(expected == 0p) {77 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {78 return false;79 }80 }81 else {82 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {83 __unpark( id, expected );84 return true;85 }86 }87 }88 }89 70 90 71 //----------------------------------------------------------------------------- … … 201 182 // Reader side : acquire when using the ready queue to schedule but not 202 183 // creating/destroying queues 203 static inline void ready_schedule_lock( struct __processor_id_t * proc) with(*__scheduler_lock) { 204 unsigned iproc = proc->id; 205 /*paranoid*/ verify(data[iproc].handle == proc); 184 static inline void ready_schedule_lock(void) with(*__scheduler_lock) { 185 /* paranoid */ verify( ! __preemption_enabled() ); 186 /* paranoid */ verify( kernelTLS().this_proc_id ); 187 188 unsigned iproc = kernelTLS().this_proc_id->id; 189 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id); 206 190 /*paranoid*/ verify(iproc < ready); 207 191 … … 225 209 } 226 210 227 static inline void ready_schedule_unlock( struct __processor_id_t * proc) with(*__scheduler_lock) { 228 unsigned iproc = proc->id; 229 /*paranoid*/ verify(data[iproc].handle == proc); 211 static inline void ready_schedule_unlock(void) with(*__scheduler_lock) { 212 /* paranoid */ verify( ! __preemption_enabled() ); 213 /* paranoid */ verify( kernelTLS().this_proc_id ); 214 215 unsigned iproc = kernelTLS().this_proc_id->id; 216 /*paranoid*/ verify(data[iproc].handle == kernelTLS().this_proc_id); 230 217 /*paranoid*/ verify(iproc < ready); 231 218 /*paranoid*/ verify(data[iproc].lock); … … 239 226 240 227 #ifdef __CFA_WITH_VERIFY__ 241 static inline bool ready_schedule_islocked( struct __processor_id_t * proc) { 228 static inline bool ready_schedule_islocked(void) { 229 /* paranoid */ verify( ! __preemption_enabled() ); 230 /*paranoid*/ verify( kernelTLS().this_proc_id ); 231 __processor_id_t * proc = kernelTLS().this_proc_id; 242 232 return __scheduler_lock->data[proc->id].owned; 243 233 }
Note: See TracChangeset
for help on using the changeset viewer.