- File:
-
- 1 edited
-
libcfa/src/concurrency/ready_queue.cfa (modified) (8 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/concurrency/ready_queue.cfa
rc993b15 rfc59df78 93 93 this.alloc = 0; 94 94 this.ready = 0; 95 this.lock = false; 95 96 this.data = alloc(this.max); 96 this.write_lock = false; 97 97 98 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data )) % 64) ); 99 /*paranoid*/ verify( 0 == (((uintptr_t)(this.data + 1)) % 64) ); 98 100 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.alloc), &this.alloc)); 99 101 /*paranoid*/ verify(__atomic_is_lock_free(sizeof(this.ready), &this.ready)); … … 104 106 } 105 107 108 void ?{}( __scheduler_lock_id_t & this, __processor_id_t * proc ) { 109 this.handle = proc; 110 this.lock = false; 111 #ifdef __CFA_WITH_VERIFY__ 112 this.owned = false; 113 #endif 114 } 106 115 107 116 //======================================================================= 108 117 // Lock-Free registering/unregistering of threads 109 unsigned register_proc_id( void) with(*__scheduler_lock) {118 void register_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) { 110 119 __cfadbg_print_safe(ready_queue, "Kernel : Registering proc %p for RW-Lock\n", proc); 111 bool * handle = (bool *)&kernelTLS().sched_lock;112 120 113 121 // Step - 1 : check if there is already space in the data … … 116 124 // Check among all the ready 117 125 for(uint_fast32_t i = 0; i < s; i++) { 118 bool * volatile * cell = (bool * volatile *)&data[i]; // Cforall is bugged and the double volatiles causes problems 119 /* paranoid */ verify( handle != *cell ); 120 121 bool * null = 0p; // Re-write every loop since compare thrashes it 122 if( __atomic_load_n(cell, (int)__ATOMIC_RELAXED) == null 123 && __atomic_compare_exchange_n( cell, &null, handle, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 124 /* paranoid */ verify(i < ready); 125 /* paranoid */ verify( (kernelTLS().sched_id = i, true) ); 126 return i; 126 __processor_id_t * null = 0p; // Re-write every loop since compare thrashes it 127 if( __atomic_load_n(&data[i].handle, (int)__ATOMIC_RELAXED) == null 128 && __atomic_compare_exchange_n( &data[i].handle, &null, proc, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 129 /*paranoid*/ verify(i < ready); 130 /*paranoid*/ verify(0 == (__alignof__(data[i]) % cache_line_size)); 131 /*paranoid*/ verify((((uintptr_t)&data[i]) % cache_line_size) == 0); 132 proc->id = i; 127 133 } 128 134 } … … 135 141 136 142 // Step - 3 : Mark space as used and then publish it. 137 data[n] = handle; 143 __scheduler_lock_id_t * storage = (__scheduler_lock_id_t *)&data[n]; 144 (*storage){ proc }; 138 145 while() { 139 146 unsigned copy = n; … … 147 154 148 155 // Return new spot. 149 /* paranoid */ verify(n < ready); 150 /* paranoid */ verify( (kernelTLS().sched_id = n, true) ); 151 return n; 152 } 153 154 void unregister_proc_id( unsigned id ) with(*__scheduler_lock) { 155 /* paranoid */ verify(id < ready); 156 /* paranoid */ verify(id == kernelTLS().sched_id); 157 /* paranoid */ verify(data[id] == &kernelTLS().sched_lock); 158 159 bool * volatile * cell = (bool * volatile *)&data[id]; // Cforall is bugged and the double volatiles causes problems 160 161 __atomic_store_n(cell, 0p, __ATOMIC_RELEASE); 156 /*paranoid*/ verify(n < ready); 157 /*paranoid*/ verify(__alignof__(data[n]) == (2 * cache_line_size)); 158 /*paranoid*/ verify((((uintptr_t)&data[n]) % cache_line_size) == 0); 159 proc->id = n; 160 } 161 162 void unregister_proc_id( struct __processor_id_t * proc ) with(*__scheduler_lock) { 163 unsigned id = proc->id; 164 /*paranoid*/ verify(id < ready); 165 /*paranoid*/ verify(proc == __atomic_load_n(&data[id].handle, __ATOMIC_RELAXED)); 166 __atomic_store_n(&data[id].handle, 0p, __ATOMIC_RELEASE); 162 167 163 168 __cfadbg_print_safe(ready_queue, "Kernel : Unregister proc %p\n", proc); … … 169 174 uint_fast32_t ready_mutate_lock( void ) with(*__scheduler_lock) { 170 175 /* paranoid */ verify( ! __preemption_enabled() ); 171 /* paranoid */ verify( ! kernelTLS().sched_lock );172 176 173 177 // Step 1 : lock global lock 174 178 // It is needed to avoid processors that register mid Critical-Section 175 179 // to simply lock their own lock and enter. 176 __atomic_acquire( & write_lock );180 __atomic_acquire( &lock ); 177 181 178 182 // Step 2 : lock per-proc lock … … 182 186 uint_fast32_t s = ready; 183 187 for(uint_fast32_t i = 0; i < s; i++) { 184 volatile bool * llock = data[i]; 185 if(llock) __atomic_acquire( llock ); 188 __atomic_acquire( &data[i].lock ); 186 189 } 187 190 … … 200 203 // Alternative solution : return s in write_lock and pass it to write_unlock 201 204 for(uint_fast32_t i = 0; i < last_s; i++) { 202 v olatile bool * llock = data[i];203 if(llock) __atomic_store_n(llock, (bool)false, __ATOMIC_RELEASE);205 verify(data[i].lock); 206 __atomic_store_n(&data[i].lock, (bool)false, __ATOMIC_RELEASE); 204 207 } 205 208 206 209 // Step 2 : release global lock 207 /*paranoid*/ assert(true == write_lock);208 __atomic_store_n(& write_lock, (bool)false, __ATOMIC_RELEASE);210 /*paranoid*/ assert(true == lock); 211 __atomic_store_n(&lock, (bool)false, __ATOMIC_RELEASE); 209 212 210 213 /* paranoid */ verify( ! __preemption_enabled() );
Note:
See TracChangeset
for help on using the changeset viewer.