Changeset d30e3eb
- Timestamp:
- Mar 24, 2023, 4:44:46 PM (2 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 1633e04
- Parents:
- de934c7
- Location:
- libcfa/src/concurrency
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
TabularUnified libcfa/src/concurrency/channel.hfa ¶
rde934c7 rd30e3eb 2 2 3 3 #include <locks.hfa> 4 5 struct no_reacq_lock { 6 inline exp_backoff_then_block_lock; 7 }; 8 9 // have to override these by hand to get around plan 9 inheritance bug where resolver can't find the appropriate routine to call 10 static inline void ?{}( no_reacq_lock & this ) { ((exp_backoff_then_block_lock &)this){}; } 11 static inline bool try_lock(no_reacq_lock & this) { return try_lock(((exp_backoff_then_block_lock &)this)); } 12 static inline void lock(no_reacq_lock & this) { lock(((exp_backoff_then_block_lock &)this)); } 13 static inline void unlock(no_reacq_lock & this) { unlock(((exp_backoff_then_block_lock &)this)); } 14 static inline void on_notify(no_reacq_lock & this, struct thread$ * t ) { on_notify(((exp_backoff_then_block_lock &)this), t); } 15 static inline size_t on_wait(no_reacq_lock & this) { return on_wait(((exp_backoff_then_block_lock &)this)); } 16 // override wakeup so that we don't reacquire the lock if using a condvar 17 static inline void on_wakeup( no_reacq_lock & this, size_t recursion ) {} 18 19 #define __PREVENTION_CHANNEL 4 #include <list.hfa> 5 6 // #define __PREVENTION_CHANNEL 20 7 #ifdef __PREVENTION_CHANNEL 21 8 forall( T ) { 22 9 struct channel { 23 size_t size; 24 size_t front, back, count; 10 size_t size, count, front, back; 25 11 T * buffer; 26 12 thread$ * chair; … … 87 73 return; 88 74 } 89 elseinsert_( chan, elem );75 insert_( chan, elem ); 90 76 91 77 unlock( mutex_lock ); … … 110 96 111 97 // wait if buffer is empty, work will be completed by someone else 112 if ( count == 0 ) { 98 if ( count == 0 ) { 113 99 chair = active_thread(); 114 100 chair_elem = &retval; … … 121 107 memcpy((void *)&retval, (void *)&buffer[front], sizeof(T)); 122 108 count -= 1; 123 front = (front + 1) % size; 109 front++; 110 if ( front == size ) front = 0; 124 111 125 112 if ( chair != 0p ) { … … 142 129 143 130 #ifndef __PREVENTION_CHANNEL 131 132 // link field used for threads waiting on channel 133 struct wait_link { 134 // used to put wait_link on a dl queue 135 inline dlink(wait_link); 136 137 // waiting thread 138 struct thread$ * t; 139 140 // shadow field 141 void * elem; 142 }; 143 P9_EMBEDDED( wait_link, dlink(wait_link) ) 144 145 static inline void ?{}( wait_link & this, thread$ * t, void * elem ) { 146 this.t = t; 147 this.elem = elem; 148 } 149 144 150 forall( T ) { 151 145 152 struct channel { 146 153 size_t size; 147 154 size_t front, back, count; 148 155 T * buffer; 149 fast_cond_var( no_reacq_lock ) prods, cons;150 no_reacq_lock mutex_lock;156 dlist( wait_link ) prods, cons; 157 exp_backoff_then_block_lock mutex_lock; 151 158 }; 152 159 … … 164 171 static inline size_t get_count( channel(T) & chan ) with(chan) { return count; } 165 172 static inline size_t get_size( channel(T) & chan ) with(chan) { return size; } 166 static inline bool has_waiters( channel(T) & chan ) with(chan) { return ! empty( cons ) || !empty( prods ); }167 static inline bool has_waiting_consumers( channel(T) & chan ) with(chan) { return ! empty( cons ); }168 static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return ! empty( prods ); }173 static inline bool has_waiters( channel(T) & chan ) with(chan) { return !cons`isEmpty || !prods`isEmpty; } 174 static inline bool has_waiting_consumers( channel(T) & chan ) with(chan) { return !cons`isEmpty; } 175 static inline bool has_waiting_producers( channel(T) & chan ) with(chan) { return !prods`isEmpty; } 169 176 170 177 static inline void insert_( channel(T) & chan, T & elem ) with(chan) { … … 175 182 } 176 183 184 static inline void wake_one( dlist( wait_link ) & queue ) { 185 wait_link & popped = try_pop_front( queue ); 186 unpark( popped.t ); 187 } 188 189 static inline void block( dlist( wait_link ) & queue, void * elem_ptr, exp_backoff_then_block_lock & lock ) { 190 wait_link w{ active_thread(), elem_ptr }; 191 insert_last( queue, w ); 192 unlock( lock ); 193 park(); 194 } 177 195 178 196 static inline void insert( channel(T) & chan, T elem ) with(chan) { … … 180 198 181 199 // have to check for the zero size channel case 182 if ( size == 0 && ! empty( cons )) {183 memcpy( (void *)front( cons ), (void *)&elem, sizeof(T));184 notify_one( cons );200 if ( size == 0 && !cons`isEmpty ) { 201 memcpy(cons`first.elem, (void *)&elem, sizeof(T)); 202 wake_one( cons ); 185 203 unlock( mutex_lock ); 186 204 return; … … 188 206 189 207 // wait if buffer is full, work will be completed by someone else 190 if ( count == size ) { 191 wait( prods, mutex_lock, (uintptr_t)&elem);208 if ( count == size ) { 209 block( prods, &elem, mutex_lock ); 192 210 return; 193 211 } // if 194 212 195 if ( count == 0 && ! empty( cons ) )196 // do waiting consumer work197 memcpy((void *)front( cons ), (void *)&elem, sizeof(T));198 else insert_( chan, elem );213 if ( count == 0 && !cons`isEmpty ) { 214 memcpy(cons`first.elem, (void *)&elem, sizeof(T)); // do waiting consumer work 215 wake_one( cons ); 216 } else insert_( chan, elem ); 199 217 200 notify_one( cons );201 218 unlock( mutex_lock ); 202 219 } … … 207 224 208 225 // have to check for the zero size channel case 209 if ( size == 0 && ! empty( prods )) {210 memcpy((void *)&retval, (void *) front( prods ), sizeof(T));211 notify_one( prods );226 if ( size == 0 && !prods`isEmpty ) { 227 memcpy((void *)&retval, (void *)prods`first.elem, sizeof(T)); 228 wake_one( prods ); 212 229 unlock( mutex_lock ); 213 230 return retval; … … 215 232 216 233 // wait if buffer is empty, work will be completed by someone else 217 if (count == 0) { 218 wait( cons, mutex_lock, (uintptr_t)&retval);234 if (count == 0) { 235 block( cons, &retval, mutex_lock ); 219 236 return retval; 220 237 } … … 225 242 front = (front + 1) % size; 226 243 227 if (count == size - 1 && !empty( prods ) ) 228 insert_( chan, *((T *)front( prods )) ); // do waiting producer work 229 230 notify_one( prods ); 244 if (count == size - 1 && !prods`isEmpty ) { 245 insert_( chan, *(T *)prods`first.elem ); // do waiting producer work 246 wake_one( prods ); 247 } 248 231 249 unlock( mutex_lock ); 232 250 return retval; 233 251 } 234 235 252 } // forall( T ) 236 253 #endif -
TabularUnified libcfa/src/concurrency/locks.hfa ¶
rde934c7 rd30e3eb 253 253 static inline void on_wakeup(clh_lock & this, size_t recursion ) { lock(this); } 254 254 255 256 255 //----------------------------------------------------------------------------- 257 256 // Exponential backoff then block lock … … 272 271 this.lock_value = 0; 273 272 } 274 static inline void ^?{}( exp_backoff_then_block_lock & this ) {}275 // static inline void ?{}( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;276 // static inline void ?=?( exp_backoff_then_block_lock & this, exp_backoff_then_block_lock this2 ) = void;277 273 278 274 static inline bool internal_try_lock(exp_backoff_then_block_lock & this, size_t & compare_val) with(this) { 279 if (__atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) { 280 return true; 281 } 282 return false; 275 return __atomic_compare_exchange_n(&lock_value, &compare_val, 1, false, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); 283 276 } 284 277 … … 286 279 287 280 static inline bool try_lock_contention(exp_backoff_then_block_lock & this) with(this) { 288 if (__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE) == 0) { 289 return true; 290 } 291 return false; 281 return !__atomic_exchange_n(&lock_value, 2, __ATOMIC_ACQUIRE); 292 282 } 293 283 294 284 static inline bool block(exp_backoff_then_block_lock & this) with(this) { 295 lock( spinlock __cfaabi_dbg_ctx2 ); // TODO change to lockfree queue (MPSC) 296 if (lock_value!= 2) {297 298 299 300 301 285 lock( spinlock __cfaabi_dbg_ctx2 ); 286 if (__atomic_load_n( &lock_value, __ATOMIC_SEQ_CST) != 2) { 287 unlock( spinlock ); 288 return true; 289 } 290 insert_last( blocked_threads, *active_thread() ); 291 unlock( spinlock ); 302 292 park( ); 303 293 return true; … … 307 297 size_t compare_val = 0; 308 298 int spin = 4; 299 309 300 // linear backoff 310 301 for( ;; ) { … … 324 315 static inline void unlock(exp_backoff_then_block_lock & this) with(this) { 325 316 if (__atomic_exchange_n(&lock_value, 0, __ATOMIC_RELEASE) == 1) return; 326 327 328 329 317 lock( spinlock __cfaabi_dbg_ctx2 ); 318 thread$ * t = &try_pop_front( blocked_threads ); 319 unlock( spinlock ); 320 unpark( t ); 330 321 } 331 322
Note: See TracChangeset
for help on using the changeset viewer.