- File:
-
- 1 edited
-
libcfa/src/concurrency/ready_queue.cfa (modified) (6 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/ready_queue.cfa
r1eb239e4 r62502cc4 150 150 // queues or removing them. 151 151 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { 152 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 153 152 154 // Step 1 : lock global lock 153 155 // It is needed to avoid processors that register mid Critical-Section … … 164 166 } 165 167 168 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 166 169 return s; 167 170 } 168 171 169 172 void ready_mutate_unlock( uint_fast32_t last_s ) with(*__scheduler_lock) { 173 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 174 170 175 // Step 1 : release local locks 171 176 // This must be done while the global lock is held to avoid … … 182 187 /*paranoid*/ assert(true == lock); 183 188 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE); 189 190 /* paranoid */ verify( ! kernelTLS.preemption_state.enabled ); 184 191 } 185 192 … … 419 426 // Actually pop the list 420 427 struct $thread * thrd; 421 bool emptied; 422 [thrd, emptied] = pop(lane); 428 thrd = pop(lane); 423 429 424 430 /* paranoid */ verify(thrd); … … 457 463 if(head(lane)->link.next == thrd) { 458 464 $thread * pthrd; 459 bool emptied; 460 [pthrd, emptied] = pop(lane); 465 pthrd = pop(lane); 461 466 462 467 /* paranoid */ verify( pthrd == thrd ); … … 608 613 while(!is_empty(lanes.data[idx])) { 609 614 struct $thread * thrd; 610 __attribute__((unused)) bool _; 611 [thrd, _] = pop(lanes.data[idx]); 615 thrd = pop(lanes.data[idx]); 612 616 613 617 push(cltr, thrd);
Note:
See TracChangeset
for help on using the changeset viewer.