Changeset 1c507eb for libcfa/src
- Timestamp:
- Sep 4, 2020, 2:00:53 PM (5 years ago)
- Branches:
- ADT, arm-eh, ast-experimental, enum, forall-pointer-decay, jacob/cs343-translation, master, new-ast-unique-expr, pthread-emulation, qualifiedEnum
- Children:
- 7a80113
- Parents:
- 5a1c9ef (diff), 2801829 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - Location:
- libcfa/src
- Files:
-
- 3 edited
-
bits/locks.hfa (modified) (4 diffs)
-
heap.cfa (modified) (9 diffs)
-
stdlib.hfa (modified) (5 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/bits/locks.hfa
r5a1c9ef r1c507eb 218 218 } 219 219 220 // Semaphore which only supports a single thread and one post 221 // Semaphore which only supports a single thread 220 // Synchronozation primitive which only supports a single thread and one post 221 // Similar to a binary semaphore with a 'one shot' semantic 222 // is expected to be discarded after each party call their side 222 223 struct oneshot { 224 // Internal state : 225 // 0p : is initial state (wait will block) 226 // 1p : fulfilled (wait won't block) 227 // any thread : a thread is currently waiting 223 228 struct $thread * volatile ptr; 224 229 }; … … 231 236 void ^?{}(oneshot & this) {} 232 237 238 // Wait for the post, return immidiately if it already happened. 239 // return true if the thread was parked 233 240 bool wait(oneshot & this) { 234 241 for() { … … 244 251 } 245 252 253 // Mark as fulfilled, wake thread if needed 254 // return true if a thread was unparked 246 255 bool post(oneshot & this) { 247 256 struct $thread * got = __atomic_exchange_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); … … 251 260 } 252 261 } 262 263 // base types for future to build upon 264 // It is based on the 'oneshot' type to allow multiple futures 265 // to block on the same instance, permitting users to block a single 266 // thread on "any of" [a given set of] futures. 267 // does not support multiple threads waiting on the same future 268 struct future_t { 269 // Internal state : 270 // 0p : is initial state (wait will block) 271 // 1p : fulfilled (wait won't block) 272 // 2p : in progress () 273 // 3p : abandoned, server should delete 274 // any oneshot : a context has been setup to wait, a thread could wait on it 275 struct oneshot * volatile ptr; 276 }; 277 278 static inline { 279 void ?{}(future_t & this) { 280 this.ptr = 0p; 281 } 282 283 void ^?{}(future_t & this) {} 284 285 // check if the future is available 286 bool available( future_t & this ) { 287 return this.ptr == 1p; 288 } 289 290 // Prepare the future to be waited on 291 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly 292 bool setup( future_t & this, oneshot & wait_ctx ) { 293 /* paranoid */ verify( wait_ctx.ptr == 0p ); 294 // The future needs to set the wait context 295 for() { 296 struct oneshot * expected = this.ptr; 297 // Is the future already fulfilled? 298 if(expected == 1p) return false; // Yes, just return false (didn't block) 299 300 // The future is not fulfilled, try to setup the wait context 301 /* paranoid */ verify( expected == 0p ); 302 if(__atomic_compare_exchange_n(&this.ptr, &expected, &wait_ctx, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 303 return true; 304 } 305 } 306 } 307 308 // Stop waiting on a future 309 // When multiple futures are waited for together in "any of" pattern 310 // futures that weren't fulfilled before the thread woke up 311 // should retract the wait ctx 312 // intented to be use by wait, wait_any, waitfor, etc. rather than used directly 313 void retract( future_t & this, oneshot & wait_ctx ) { 314 // Remove the wait context 315 struct oneshot * got = __atomic_exchange_n( &this.ptr, 0p, __ATOMIC_SEQ_CST); 316 317 // got == 0p: future was never actually setup, just return 318 if( got == 0p ) return; 319 320 // got == wait_ctx: since fulfil does an atomic_swap, 321 // if we got back the original then no one else saw context 322 // It is safe to delete (which could happen after the return) 323 if( got == &wait_ctx ) return; 324 325 // got == 1p: the future is ready and the context was fully consumed 326 // the server won't use the pointer again 327 // It is safe to delete (which could happen after the return) 328 if( got == 1p ) return; 329 330 // got == 2p: the future is ready but the context hasn't fully been consumed 331 // spin until it is safe to move on 332 if( got == 2p ) { 333 while( this.ptr != 1p ) Pause(); 334 return; 335 } 336 337 // got == any thing else, something wen't wrong here, abort 338 abort("Future in unexpected state"); 339 } 340 341 // Mark the future as abandoned, meaning it will be deleted by the server 342 void abandon( future_t & this ) { 343 struct oneshot * got = __atomic_exchange_n( &this.ptr, 3p, __ATOMIC_SEQ_CST); 344 345 // got == 2p: the future is ready but the context hasn't fully been consumed 346 // spin until it is safe to move on 347 if( got == 2p ) { 348 while( this.ptr != 1p ) Pause(); 349 } 350 return; 351 } 352 353 // from the server side, mark the future as fulfilled 354 // delete it if needed 355 bool fulfil( future_t & this ) { 356 for() { 357 struct oneshot * expected = this.ptr; 358 // was this abandoned? 359 if( expected == 3p ) { free( &this ); return false; } 360 361 /* paranoid */ verify( expected != 1p ); // Future is already fulfilled, should not happen 362 /* paranoid */ verify( expected != 2p ); // Future is bein fulfilled by someone else, this is even less supported then the previous case. 363 364 // If there is a wait context, we need to consume it and mark it as consumed after 365 // If there is no context then we can skip the in progress phase 366 struct oneshot * want = expected == 0p ? 1p : 2p; 367 if(__atomic_compare_exchange_n(&this.ptr, &expected, want, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 368 if( expected == 0p ) { /* paranoid */ verify( this.ptr == 1p); return false; } 369 bool ret = post( *expected ); 370 __atomic_store_n( &this.ptr, 1p, __ATOMIC_SEQ_CST); 371 return ret; 372 } 373 } 374 375 } 376 377 // Wait for the future to be fulfilled 378 bool wait( future_t & this ) { 379 oneshot temp; 380 if( !setup(this, temp) ) return false; 381 382 // Wait context is setup, just wait on it 383 bool ret = wait( temp ); 384 385 // Wait for the future to tru 386 while( this.ptr == 2p ) Pause(); 387 // Make sure the state makes sense 388 // Should be fulfilled, could be in progress but it's out of date if so 389 // since if that is the case, the oneshot was fulfilled (unparking this thread) 390 // and the oneshot should not be needed any more 391 __attribute__((unused)) struct oneshot * was = this.ptr; 392 /* paranoid */ verifyf( was == 1p, "Expected this.ptr to be 1p, was %p\n", was ); 393 394 // Mark the future as fulfilled, to be consistent 395 // with potential calls to avail 396 // this.ptr = 1p; 397 return ret; 398 } 399 } 253 400 #endif -
libcfa/src/heap.cfa
r5a1c9ef r1c507eb 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Aug 24 20:29:24 202013 // Update Count : 9 2612 // Last Modified On : Thu Sep 3 16:22:54 2020 13 // Update Count : 943 14 14 // 15 15 … … 29 29 #include "math.hfa" // ceiling 30 30 #include "bitmanip.hfa" // is_pow2, ceiling2 31 32 #define MIN(x, y) (y > x ? x : y)33 31 34 32 static bool traceHeap = false; … … 956 954 957 955 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 958 memcpy( naddr, oaddr, MIN( osize, size ) ); // copy bytes956 memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes 959 957 free( oaddr ); 960 958 … … 1218 1216 #endif // __STATISTICS__ 1219 1217 1220 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.1221 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases1222 if ( unlikely( oaddr == 0p ) ) {1223 #ifdef __STATISTICS__1224 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST );1225 #endif // __STATISTICS__1226 return memalignNoStats( nalign, size );1227 } // if1228 1229 1218 if ( unlikely( nalign < libAlign() ) ) nalign = libAlign(); // reset alignment to minimum 1230 1219 #ifdef __CFA_DEBUG__ … … 1233 1222 #endif // __CFA_DEBUG__ 1234 1223 1235 HeapManager.Storage.Header * header; 1236 HeapManager.FreeHeader * freeElem; 1237 size_t bsize, oalign; 1238 headers( "resize", oaddr, header, freeElem, bsize, oalign ); 1239 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1240 1241 if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match 1242 if ( oalign > libAlign() ) { // fake header ? 1224 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1225 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1226 if ( unlikely( oaddr == 0p ) ) { 1227 #ifdef __STATISTICS__ 1228 __atomic_add_fetch( &resize_storage, size, __ATOMIC_SEQ_CST ); 1229 #endif // __STATISTICS__ 1230 return memalignNoStats( nalign, size ); 1231 } // if 1232 1233 // Attempt to reuse existing storage. 1234 HeapManager.Storage.Header * header = headerAddr( oaddr ); 1235 if ( unlikely ( ( header->kind.fake.alignment & 1 == 1 && // old fake header ? 1236 (uintptr_t)oaddr % nalign == 0 && // lucky match ? 1237 header->kind.fake.alignment <= nalign && // ok to leave LSB at 1 1238 nalign <= 128 ) // not too much alignment storage wasted ? 1239 || ( header->kind.fake.alignment & 1 != 1 && // old real header ( aligned on libAlign ) ? 1240 nalign == libAlign() ) ) ) { // new alignment also on libAlign 1241 1242 HeapManager.FreeHeader * freeElem; 1243 size_t bsize, oalign; 1244 headers( "resize", oaddr, header, freeElem, bsize, oalign ); 1245 size_t odsize = dataStorage( bsize, oaddr, header ); // data storage available in bucket 1246 1247 if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted data storage 1243 1248 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1244 } // if 1245 if ( size <= odsize && odsize <= size * 2 ) { // allow 50% wasted storage for smaller size 1246 header->kind.real.blockSize &= -2; // turn off 0 fill 1247 header->kind.real.size = size; // reset allocation size 1249 1250 header->kind.real.blockSize &= -2; // turn off 0 fill 1251 header->kind.real.size = size; // reset allocation size 1248 1252 return oaddr; 1249 1253 } // if … … 1267 1271 #endif // __CFA_DEBUG__ 1268 1272 1273 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned. 1274 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases 1275 if ( unlikely( oaddr == 0p ) ) { 1276 #ifdef __STATISTICS__ 1277 __atomic_add_fetch( &realloc_calls, 1, __ATOMIC_SEQ_CST ); 1278 __atomic_add_fetch( &realloc_storage, size, __ATOMIC_SEQ_CST ); 1279 #endif // __STATISTICS__ 1280 return memalignNoStats( nalign, size ); 1281 } // if 1282 1269 1283 HeapManager.Storage.Header * header; 1270 1284 HeapManager.FreeHeader * freeElem; … … 1272 1286 headers( "realloc", oaddr, header, freeElem, bsize, oalign ); 1273 1287 1274 if ( oalign <= nalign && (uintptr_t)oaddr % nalign == 0 ) { // <= alignment and new alignment happens to match 1275 if ( oalign > libAlign() ) { // fake header ? 1276 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1277 } // if 1288 // Attempt to reuse existing storage. 1289 if ( unlikely ( ( header->kind.fake.alignment & 1 == 1 && // old fake header ? 1290 (uintptr_t)oaddr % nalign == 0 && // lucky match ? 1291 header->kind.fake.alignment <= nalign && // ok to leave LSB at 1 1292 nalign <= 128 ) // not too much alignment storage wasted ? 1293 || ( header->kind.fake.alignment & 1 != 1 && // old real header ( aligned on libAlign ) ? 1294 nalign == libAlign() ) ) ) { // new alignment also on libAlign 1295 1296 headerAddr( oaddr )->kind.fake.alignment = nalign | 1; // update alignment (could be the same) 1278 1297 return realloc( oaddr, size ); 1298 1279 1299 } // if 1280 1300 … … 1286 1306 #endif // __STATISTICS__ 1287 1307 1288 // If size is equal to 0, either NULL or a pointer suitable to be passed to free() is returned.1289 if ( unlikely( size == 0 ) ) { free( oaddr ); return 0p; } // special cases1290 if ( unlikely( oaddr == 0p ) ) return memalignNoStats( nalign, size );1291 1292 1308 size_t osize = header->kind.real.size; // old allocation size 1293 1309 bool ozfill = (header->kind.real.blockSize & 2) != 0; // old allocation zero filled … … 1296 1312 1297 1313 headers( "realloc", naddr, header, freeElem, bsize, oalign ); 1298 memcpy( naddr, oaddr, MIN( osize, size ) ); // copy bytes1314 memcpy( naddr, oaddr, min( osize, size ) ); // copy bytes 1299 1315 free( oaddr ); 1300 1316 -
libcfa/src/stdlib.hfa
r5a1c9ef r1c507eb 10 10 // Created On : Thu Jan 28 17:12:35 2016 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Fri Aug 14 23:38:50202013 // Update Count : 50 412 // Last Modified On : Tue Sep 1 20:32:34 2020 13 // Update Count : 505 14 14 // 15 15 … … 44 44 45 45 // Macro because of returns 46 #define $VAR_ALLOC( allocation, alignment ) \47 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)allocation( (size_t)sizeof(T) ); /* C allocation */ \48 else return (T *)alignment( _Alignof(T), sizeof(T) )49 50 46 #define $ARRAY_ALLOC( allocation, alignment, dim ) \ 51 47 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)allocation( dim, (size_t)sizeof(T) ); /* C allocation */ \ 52 48 else return (T *)alignment( _Alignof(T), dim, sizeof(T) ) 53 49 54 #define $RE_SPECIALS( ptr, size, allocation, alignment ) \55 if ( unlikely( size == 0 ) || unlikely( ptr == 0p ) ) { \56 if ( unlikely( size == 0 ) ) free( ptr ); \57 $VAR_ALLOC( malloc, memalign ); \58 } /* if */59 60 50 static inline forall( dtype T | sized(T) ) { 61 51 // Cforall safe equivalents, i.e., implicit size specification 62 52 63 53 T * malloc( void ) { 64 $VAR_ALLOC( malloc, memalign ); 54 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)malloc( (size_t)sizeof(T) ); // C allocation 55 else return (T *)memalign( _Alignof(T), sizeof(T) ); 65 56 } // malloc 66 57 … … 74 65 75 66 T * resize( T * ptr, size_t size ) { // CFA resize, eliminate return-type cast 76 $RE_SPECIALS( ptr, size, malloc, memalign );77 67 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)resize( (void *)ptr, size ); // CFA resize 78 68 else return (T *)(void *)resize( (void *)ptr, _Alignof(T), size ); // CFA resize … … 80 70 81 71 T * realloc( T * ptr, size_t size ) { // CFA realloc, eliminate return-type cast 82 $RE_SPECIALS( ptr, size, malloc, memalign );83 72 if ( _Alignof(T) <= libAlign() ) return (T *)(void *)realloc( (void *)ptr, size ); // C realloc 84 73 else return (T *)(void *)realloc( (void *)ptr, _Alignof(T), size ); // CFA realloc … … 189 178 size_t copy_end = 0; 190 179 191 if (Resize) {192 ptr = (T*) (void *) resize( ( int*)Resize, Align, Dim * size );193 } else if ( Realloc) {180 if ( Resize ) { 181 ptr = (T*) (void *) resize( (void *)Resize, Align, Dim * size ); 182 } else if ( Realloc ) { 194 183 if (Fill.tag != '0') copy_end = min(malloc_size( Realloc ), Dim * size); 195 ptr = (T*) (void *) realloc( ( int*)Realloc, Align, Dim * size );184 ptr = (T*) (void *) realloc( (void *)Realloc, Align, Dim * size ); 196 185 } else { 197 186 ptr = (T*) (void *) memalign( Align, Dim * size );
Note:
See TracChangeset
for help on using the changeset viewer.