Changeset eef8dfb for libcfa/src/bits/locks.hfa
- Timestamp:
- Jan 7, 2021, 2:55:57 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 58fe85a
- Parents:
- bdfc032 (diff), 44e37ef (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)
links above to see all the changes relative to each parent. - File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/locks.hfa
rbdfc032 reef8dfb 10 10 // Created On : Tue Oct 31 15:14:38 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Sat Aug 11 15:42:24 201813 // Update Count : 1 012 // Last Modified On : Wed Aug 12 14:18:07 2020 13 // Update Count : 13 14 14 // 15 15 … … 27 27 28 28 // pause to prevent excess processor bus usage 29 #if defined( __sparc ) 30 #define Pause() __asm__ __volatile__ ( "rd %ccr,%g0" ) 31 #elif defined( __i386 ) || defined( __x86_64 ) 29 #if defined( __i386 ) || defined( __x86_64 ) 32 30 #define Pause() __asm__ __volatile__ ( "pause" : : : ) 33 31 #elif defined( __ARM_ARCH ) 34 #define Pause() __asm__ __volatile__ ( " nop" : : : )32 #define Pause() __asm__ __volatile__ ( "YIELD" : : : ) 35 33 #else 36 34 #error unsupported architecture … … 54 52 55 53 #ifdef __CFA_DEBUG__ 56 void __cfaabi_dbg_record (__spinlock_t & this, const char * prev_name);54 void __cfaabi_dbg_record_lock(__spinlock_t & this, const char prev_name[]); 57 55 #else 58 #define __cfaabi_dbg_record (x, y)56 #define __cfaabi_dbg_record_lock(x, y) 59 57 #endif 60 58 } 61 62 extern void yield( unsigned int );63 59 64 60 static inline void ?{}( __spinlock_t & this ) { … … 68 64 // Lock the spinlock, return false if already acquired 69 65 static inline bool try_lock ( __spinlock_t & this __cfaabi_dbg_ctx_param2 ) { 66 disable_interrupts(); 70 67 bool result = (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0); 71 68 if( result ) { 72 disable_interrupts(); 73 __cfaabi_dbg_record( this, caller ); 69 __cfaabi_dbg_record_lock( this, caller ); 70 } else { 71 enable_interrupts_noPoll(); 74 72 } 75 73 return result; … … 83 81 #endif 84 82 83 disable_interrupts(); 85 84 for ( unsigned int i = 1;; i += 1 ) { 86 85 if ( (this.lock == 0) && (__atomic_test_and_set( &this.lock, __ATOMIC_ACQUIRE ) == 0) ) break; … … 98 97 #endif 99 98 } 100 disable_interrupts(); 101 __cfaabi_dbg_record( this, caller ); 99 __cfaabi_dbg_record_lock( this, caller ); 102 100 } 103 101 104 102 static inline void unlock( __spinlock_t & this ) { 103 __atomic_clear( &this.lock, __ATOMIC_RELEASE ); 105 104 enable_interrupts_noPoll(); 106 __atomic_clear( &this.lock, __ATOMIC_RELEASE );107 105 } 108 106 … … 112 110 #endif 113 111 112 extern "C" { 113 char * strerror(int); 114 } 115 #define CHECKED(x) { int err = x; if( err != 0 ) abort("KERNEL ERROR: Operation \"" #x "\" return error %d - %s\n", err, strerror(err)); } 116 114 117 struct __bin_sem_t { 115 bool signaled;116 118 pthread_mutex_t lock; 117 119 pthread_cond_t cond; 120 int val; 118 121 }; 119 122 120 123 static inline void ?{}(__bin_sem_t & this) with( this ) { 121 signaled = false; 122 pthread_mutex_init(&lock, NULL); 123 pthread_cond_init (&cond, NULL); 124 // Create the mutex with error checking 125 pthread_mutexattr_t mattr; 126 pthread_mutexattr_init( &mattr ); 127 pthread_mutexattr_settype( &mattr, PTHREAD_MUTEX_ERRORCHECK_NP); 128 pthread_mutex_init(&lock, &mattr); 129 130 pthread_cond_init (&cond, (const pthread_condattr_t *)0p); // workaround trac#208: cast should not be required 131 val = 0; 124 132 } 125 133 126 134 static inline void ^?{}(__bin_sem_t & this) with( this ) { 127 pthread_mutex_destroy(&lock);128 pthread_cond_destroy (&cond);135 CHECKED( pthread_mutex_destroy(&lock) ); 136 CHECKED( pthread_cond_destroy (&cond) ); 129 137 } 130 138 131 139 static inline void wait(__bin_sem_t & this) with( this ) { 132 140 verify(__cfaabi_dbg_in_kernel()); 133 pthread_mutex_lock(&lock);134 if(!signaled) { // this must be a loop, not if!141 CHECKED( pthread_mutex_lock(&lock) ); 142 while(val < 1) { 135 143 pthread_cond_wait(&cond, &lock); 136 144 } 137 signaled = false; 138 pthread_mutex_unlock(&lock); 139 } 140 141 static inline void post(__bin_sem_t & this) with( this ) { 142 verify(__cfaabi_dbg_in_kernel()); 143 144 pthread_mutex_lock(&lock); 145 bool needs_signal = !signaled; 146 signaled = true; 147 pthread_mutex_unlock(&lock); 148 149 if (needs_signal) 150 pthread_cond_signal(&cond); 145 val -= 1; 146 CHECKED( pthread_mutex_unlock(&lock) ); 147 } 148 149 static inline bool post(__bin_sem_t & this) with( this ) { 150 bool needs_signal = false; 151 152 CHECKED( pthread_mutex_lock(&lock) ); 153 if(val < 1) { 154 val += 1; 155 pthread_cond_signal(&cond); 156 needs_signal = true; 157 } 158 CHECKED( pthread_mutex_unlock(&lock) ); 159 160 return needs_signal; 161 } 162 163 #undef CHECKED 164 165 struct $thread; 166 extern void park( void ); 167 extern void unpark( struct $thread * this ); 168 static inline struct $thread * active_thread (); 169 170 // Semaphore which only supports a single thread 171 struct single_sem { 172 struct $thread * volatile ptr; 173 }; 174 175 static inline { 176 void ?{}(single_sem & this) { 177 this.ptr = 0p; 178 } 179 180 void ^?{}(single_sem &) {} 181 182 bool wait(single_sem & this) { 183 for() { 184 struct $thread * expected = this.ptr; 185 if(expected == 1p) { 186 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 187 return false; 188 } 189 } 190 else { 191 /* paranoid */ verify( expected == 0p ); 192 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 193 park(); 194 return true; 195 } 196 } 197 198 } 199 } 200 201 bool post(single_sem & this) { 202 for() { 203 struct $thread * expected = this.ptr; 204 if(expected == 1p) return false; 205 if(expected == 0p) { 206 if(__atomic_compare_exchange_n(&this.ptr, &expected, 1p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 207 return false; 208 } 209 } 210 else { 211 if(__atomic_compare_exchange_n(&this.ptr, &expected, 0p, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 212 unpark( expected ); 213 return true; 214 } 215 } 216 } 217 } 218 } 219 220 // Synchronozation primitive which only supports a single thread and one post 221 // Similar to a binary semaphore with a 'one shot' semantic 222 // is expected to be discarded after each party call their side 223 struct oneshot { 224 // Internal state : 225 // 0p : is initial state (wait will block) 226 // 1p : fulfilled (wait won't block) 227 // any thread : a thread is currently waiting 228 struct $thread * volatile ptr; 229 }; 230 231 static inline { 232 void ?{}(oneshot & this) { 233 this.ptr = 0p; 234 } 235 236 void ^?{}(oneshot &) {} 237 238 // Wait for the post, return immidiately if it already happened. 239 // return true if the thread was parked 240 bool wait(oneshot & this) { 241 for() { 242 struct $thread * expected = this.ptr; 243 if(expected == 1p) return false; 244 /* paranoid */ verify( expected == 0p ); 245 if(__atomic_compare_exchange_n(&this.ptr, &expected, active_thread(), false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 246 park(); 247 /* paranoid */ verify( this.ptr == 1p ); 248 return true; 249 } 250 } 251 } 252 253 // Mark as fulfilled, wake thread if needed 254 // return true if a thread was unparked 255 bool post(oneshot & this) { 256 struct $thread * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 257 if( got == 0p ) return false; 258 unpark( got ); 259 return true; 260 } 261 } 262 263 // base types for future to build upon 264 // It is based on the 'oneshot' type to allow multiple futures 265 // to block on the same instance, permitting users to block a single 266 // thread on "any of" [a given set of] futures. 267 // does not support multiple threads waiting on the same future 268 struct future_t { 269 // Internal state : 270 // 0p : is initial state (wait will block) 271 // 1p : fulfilled (wait won't block) 272 // 2p : in progress () 273 // 3p : abandoned, server should delete 274 // any oneshot : a context has been setup to wait, a thread could wait on it 275 struct oneshot * volatile ptr; 276 }; 277 278 static inline { 279 void ?{}(future_t & this) { 280 this.ptr = 0p; 281 } 282 283 void ^?{}(future_t &) {} 284 285 void reset(future_t & this) { 286 // needs to be in 0p or 1p 287 __atomic_exchange_n( &this.ptr, 0p, __ATOMIC_SEQ_CST); 288 } 289 290 // check if the future is available 291 bool available( future_t & this ) { 292 return this.ptr == 1p; 293 } 294 295 // Prepare the future to be waited on 296 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly 297 bool setup( future_t & this, oneshot & wait_ctx ) { 298 /* paranoid */ verify( wait_ctx.ptr == 0p ); 299 // The future needs to set the wait context 300 for() { 301 struct oneshot * expected = this.ptr; 302 // Is the future already fulfilled? 303 if(expected == 1p) return false; // Yes, just return false (didn't block) 304 305 // The future is not fulfilled, try to setup the wait context 306 /* paranoid */ verify( expected == 0p ); 307 if(__atomic_compare_exchange_n(&this.ptr, &expected, &wait_ctx, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 308 return true; 309 } 310 } 311 } 312 313 // Stop waiting on a future 314 // When multiple futures are waited for together in "any of" pattern 315 // futures that weren't fulfilled before the thread woke up 316 // should retract the wait ctx 317 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly 318 void retract( future_t & this, oneshot & wait_ctx ) { 319 // Remove the wait context 320 struct oneshot * got = __atomic_exchange_n( &this.ptr, 0p, __ATOMIC_SEQ_CST); 321 322 // got == 0p: future was never actually setup, just return 323 if( got == 0p ) return; 324 325 // got == wait_ctx: since fulfil does an atomic_swap, 326 // if we got back the original then no one else saw context 327 // It is safe to delete (which could happen after the return) 328 if( got == &wait_ctx ) return; 329 330 // got == 1p: the future is ready and the context was fully consumed 331 // the server won't use the pointer again 332 // It is safe to delete (which could happen after the return) 333 if( got == 1p ) return; 334 335 // got == 2p: the future is ready but the context hasn't fully been consumed 336 // spin until it is safe to move on 337 if( got == 2p ) { 338 while( this.ptr != 1p ) Pause(); 339 return; 340 } 341 342 // got == any thing else, something wen't wrong here, abort 343 abort("Future in unexpected state"); 344 } 345 346 // Mark the future as abandoned, meaning it will be deleted by the server 347 bool abandon( future_t & this ) { 348 /* paranoid */ verify( this.ptr != 3p ); 349 350 // Mark the future as abandonned 351 struct oneshot * got = __atomic_exchange_n( &this.ptr, 3p, __ATOMIC_SEQ_CST); 352 353 // If the future isn't already fulfilled, let the server delete it 354 if( got == 0p ) return false; 355 356 // got == 2p: the future is ready but the context hasn't fully been consumed 357 // spin until it is safe to move on 358 if( got == 2p ) { 359 while( this.ptr != 1p ) Pause(); 360 got = 1p; 361 } 362 363 // The future is completed delete it now 364 /* paranoid */ verify( this.ptr != 1p ); 365 free( &this ); 366 return true; 367 } 368 369 // from the server side, mark the future as fulfilled 370 // delete it if needed 371 bool fulfil( future_t & this ) { 372 for() { 373 struct oneshot * expected = this.ptr; 374 // was this abandoned? 375 #if defined(__GNUC__) && __GNUC__ >= 7 376 #pragma GCC diagnostic push 377 #pragma GCC diagnostic ignored "-Wfree-nonheap-object" 378 #endif 379 if( expected == 3p ) { free( &this ); return false; } 380 #if defined(__GNUC__) && __GNUC__ >= 7 381 #pragma GCC diagnostic pop 382 #endif 383 384 /* paranoid */ verify( expected != 1p ); // Future is already fulfilled, should not happen 385 /* paranoid */ verify( expected != 2p ); // Future is bein fulfilled by someone else, this is even less supported then the previous case. 386 387 // If there is a wait context, we need to consume it and mark it as consumed after 388 // If there is no context then we can skip the in progress phase 389 struct oneshot * want = expected == 0p ? 1p : 2p; 390 if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 391 if( expected == 0p ) { /* paranoid */ verify( this.ptr == 1p); return false; } 392 bool ret = post( *expected ); 393 __atomic_store_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 394 return ret; 395 } 396 } 397 398 } 399 400 // Wait for the future to be fulfilled 401 bool wait( future_t & this ) { 402 oneshot temp; 403 if( !setup(this, temp) ) return false; 404 405 // Wait context is setup, just wait on it 406 bool ret = wait( temp ); 407 408 // Wait for the future to tru 409 while( this.ptr == 2p ) Pause(); 410 // Make sure the state makes sense 411 // Should be fulfilled, could be in progress but it's out of date if so 412 // since if that is the case, the oneshot was fulfilled (unparking this thread) 413 // and the oneshot should not be needed any more 414 __attribute__((unused)) struct oneshot * was = this.ptr; 415 /* paranoid */ verifyf( was == 1p, "Expected this.ptr to be 1p, was %p\n", was ); 416 417 // Mark the future as fulfilled, to be consistent 418 // with potential calls to avail 419 // this.ptr = 1p; 420 return ret; 421 } 151 422 } 152 423 #endif
Note:
See TracChangeset
for help on using the changeset viewer.