Changeset 63be3387 for libcfa/src/heap.cfa
- Timestamp:
- Nov 14, 2022, 11:52:44 AM (3 years ago)
- Branches:
- ADT, ast-experimental, master
- Children:
- 7d9598d8
- Parents:
- b77f0e1 (diff), 19a8c40 (diff)
Note: this is a merge changeset, the changes displayed below correspond to the merge itself.
Use the(diff)links above to see all the changes relative to each parent. - File:
-
- 1 edited
-
libcfa/src/heap.cfa (modified) (22 diffs)
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
rb77f0e1 r63be3387 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Thu Oct 13 22:21:52202213 // Update Count : 15 5712 // Last Modified On : Sun Oct 30 20:56:20 2022 13 // Update Count : 1584 14 14 // 15 15 … … 43 43 44 44 #define FASTLOOKUP // use O(1) table lookup from allocation size to bucket size 45 #define RETURNSPIN // toggle spinlock / lockfree stack46 45 #define OWNERSHIP // return freed memory to owner thread 46 #define RETURNSPIN // toggle spinlock / lockfree queue 47 #if ! defined( OWNERSHIP ) && defined( RETURNSPIN ) 48 #warning "RETURNSPIN is ignored without OWNERSHIP; suggest commenting out RETURNSPIN" 49 #endif // ! OWNERSHIP && RETURNSPIN 47 50 48 51 #define CACHE_ALIGN 64 … … 109 112 110 113 111 //######################### Spin Lock ######################### 114 //######################### Helpers ######################### 115 116 117 // generic Bsearchl does not inline, so substitute with hand-coded binary-search. 118 inline __attribute__((always_inline)) 119 static size_t Bsearchl( unsigned int key, const unsigned int vals[], size_t dim ) { 120 size_t l = 0, m, h = dim; 121 while ( l < h ) { 122 m = (l + h) / 2; 123 if ( (unsigned int &)(vals[m]) < key ) { // cast away const 124 l = m + 1; 125 } else { 126 h = m; 127 } // if 128 } // while 129 return l; 130 } // Bsearchl 112 131 113 132 … … 206 225 207 226 208 #define SPINLOCK 0209 #define LOCKFREE 1210 #define BUCKETLOCK SPINLOCK211 #if BUCKETLOCK == SPINLOCK212 #elif BUCKETLOCK == LOCKFREE213 #include <stackLockFree.hfa>214 #else215 #error undefined lock type for bucket lock216 #endif // LOCKFREE217 218 227 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 219 228 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. … … 232 241 void * home; // allocated block points back to home locations (must overlay alignment) 233 242 size_t blockSize; // size for munmap (must overlay alignment) 234 #if BUCKETLOCK == SPINLOCK235 243 Storage * next; // freed block points to next freed block of same size 236 #endif // SPINLOCK237 244 }; 238 245 size_t size; // allocation size in bytes 239 246 }; 240 #if BUCKETLOCK == LOCKFREE241 Link(Storage) next; // freed block points next freed block of same size (double-wide)242 #endif // LOCKFREE243 247 }; 244 248 } real; // RealHeader … … 259 263 struct __attribute__(( aligned (8) )) FreeHeader { 260 264 size_t blockSize __attribute__(( aligned(8) )); // size of allocations on this list 261 #if BUCKETLOCK == SPINLOCK262 265 #ifdef OWNERSHIP 263 266 #ifdef RETURNSPIN … … 266 269 Storage * returnList; // other thread return list 267 270 #endif // OWNERSHIP 271 268 272 Storage * freeList; // thread free list 269 #else270 StackLF(Storage) freeList;271 #endif // BUCKETLOCK272 273 Heap * homeManager; // heap owner (free storage to bucket, from bucket to heap) 273 274 }; // FreeHeader … … 290 291 #endif // __STATISTICS__ 291 292 }; // Heap 292 293 #if BUCKETLOCK == LOCKFREE294 inline __attribute__((always_inline))295 static {296 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; }297 void ?{}( Heap.FreeHeader & ) {}298 void ^?{}( Heap.FreeHeader & ) {}299 } // distribution300 #endif // LOCKFREE301 293 302 294 … … 385 377 386 378 387 // generic Bsearchl does not inline, so substitute with hand-coded binary-search.388 inline __attribute__((always_inline))389 static size_t Bsearchl( unsigned int key, const unsigned int vals[], size_t dim ) {390 size_t l = 0, m, h = dim;391 while ( l < h ) {392 m = (l + h) / 2;393 if ( (unsigned int &)(vals[m]) < key ) { // cast away const394 l = m + 1;395 } else {396 h = m;397 } // if398 } // while399 return l;400 } // Bsearchl401 402 403 379 void heapMasterCtor() with( heapMaster ) { 404 380 // Singleton pattern to initialize heap master … … 409 385 __map_prot = PROT_READ | PROT_WRITE | PROT_EXEC; 410 386 411 ?{}( extLock );412 ?{}( mgrLock );387 extLock = 0; 388 mgrLock = 0; 413 389 414 390 char * end = (char *)sbrk( 0 ); … … 497 473 #ifdef OWNERSHIP 498 474 #ifdef RETURNSPIN 499 ?{}( freeLists[j].returnLock ); 475 freeLists[j].returnLock = 0; 476 freeLists[j].returnList = 0p; 500 477 #endif // RETURNSPIN 501 freeLists[j].returnList = 0p;502 478 #endif // OWNERSHIP 479 503 480 freeLists[j].freeList = 0p; 504 481 freeLists[j].homeManager = heap; 505 482 freeLists[j].blockSize = bucketSizes[j]; 506 483 } // for 507 484 508 485 heapBuffer = 0p; 509 486 heapReserve = 0; … … 522 499 if ( unlikely( ! heapMasterBootFlag ) ) heapMasterCtor(); 523 500 524 lock( heapMaster.mgrLock ); // protect heapMaster counters501 lock( heapMaster.mgrLock ); // protect heapMaster counters 525 502 526 503 // get storage for heap manager … … 710 687 // find the closest bucket size less than or equal to the mmapStart size 711 688 maxBucketsUsed = Bsearchl( mmapStart, bucketSizes, NoBucketSizes ); // binary search 689 712 690 verify( maxBucketsUsed < NoBucketSizes ); // subscript failure ? 713 691 verify( mmapStart <= bucketSizes[maxBucketsUsed] ); // search failure ? … … 832 810 833 811 size_t increase = ceiling2( size > heapExpand ? size : heapExpand, libAlign() ); 834 // Do not call abort or strerror( errno ) as they may call malloc.835 812 if ( unlikely( sbrk( increase ) == (void *)-1 ) ) { // failed, no memory ? 836 813 unlock( extLock ); 837 abort( NO_MEMORY_MSG, size ); // no memory814 abort( NO_MEMORY_MSG, size ); // give up 838 815 } // if 839 816 … … 971 948 #endif // __STATISTICS__ 972 949 973 // Spin until the lock is acquired for this particular size of block.974 975 #if BUCKETLOCK == SPINLOCK976 950 block = freeHead->freeList; // remove node from stack 977 #else978 block = pop( freeHead->freeList );979 #endif // BUCKETLOCK980 951 if ( unlikely( block == 0p ) ) { // no free block ? 952 // Freelist for this size is empty, so check return list (OWNERSHIP), carve it out of the heap, if there 953 // is enough left, or get some more heap storage and carve it off. 981 954 #ifdef OWNERSHIP 982 // Freelist for that size is empty, so carve it out of the heap, if there is enough left, or get some more 983 // and then carve it off. 984 #ifdef RETURNSPIN 985 #if BUCKETLOCK == SPINLOCK 986 lock( freeHead->returnLock ); 987 block = freeHead->returnList; 988 freeHead->returnList = 0p; 989 unlock( freeHead->returnLock ); 990 #else 991 block = __atomic_exchange_n( &freeHead->returnList, nullptr, __ATOMIC_SEQ_CST ); 992 #endif // RETURNSPIN 993 994 if ( likely( block == 0p ) ) { // return list also empty? 955 if ( unlikely( freeHead->returnList ) ) { // race, get next time if lose race 956 #ifdef RETURNSPIN 957 lock( freeHead->returnLock ); 958 block = freeHead->returnList; 959 freeHead->returnList = 0p; 960 unlock( freeHead->returnLock ); 961 #else 962 block = __atomic_exchange_n( &freeHead->returnList, 0p, __ATOMIC_SEQ_CST ); 963 #endif // RETURNSPIN 964 965 verify( block ); 966 #ifdef __STATISTICS__ 967 stats.return_pulls += 1; 968 #endif // __STATISTICS__ 969 970 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 971 972 freeHead->freeList = block->header.kind.real.next; // merge returnList into freeHead 973 } else { 995 974 #endif // OWNERSHIP 996 975 // Do not leave kernel thread as manager_extend accesses heapManager. … … 1002 981 1003 982 #ifdef __CFA_DEBUG__ 1004 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first 1024bytes.983 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first SCRUB_SIZE bytes. 1005 984 memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) ); 1006 985 #endif // __CFA_DEBUG__ 1007 #endif // BUCKETLOCK1008 986 #ifdef OWNERSHIP 1009 } else { // merge returnList into freeHead1010 #ifdef __STATISTICS__1011 stats.return_pulls += 1;1012 #endif // __STATISTICS__1013 1014 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED.1015 1016 freeHead->freeList = block->header.kind.real.next;1017 987 } // if 1018 988 #endif // OWNERSHIP … … 1026 996 if ( unlikely( size > ULONG_MAX - __page_size ) ) return 0p; 1027 997 tsize = ceiling2( tsize, __page_size ); // must be multiple of page size 998 1028 999 #ifdef __STATISTICS__ 1029 1000 stats.counters[STAT_NAME].alloc += tsize; … … 1042 1013 if ( errno == ENOMEM ) abort( NO_MEMORY_MSG, tsize ); // no memory 1043 1014 // Do not call strerror( errno ) as it may call malloc. 1044 abort( "**** Error **** attempt to allocate large object (> %zu) of size %zu bytes and mmap failed with errno %d.", size, heapMaster.mmapStart, errno ); 1015 abort( "**** Error **** attempt to allocate large object (> %zu) of size %zu bytes and mmap failed with errno %d.", 1016 size, heapMaster.mmapStart, errno ); 1045 1017 } // if 1046 1018 block->header.kind.real.blockSize = MarkMmappedBit( tsize ); // storage size for munmap 1047 1019 1048 1020 #ifdef __CFA_DEBUG__ 1049 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first 1024 bytes. The rest of1050 // the storage set to 0 by mmap.1021 // Scrub new memory so subsequent uninitialized usages might fail. Only scrub the first SCRUB_SIZE bytes. The 1022 // rest of the storage set to 0 by mmap. 1051 1023 memset( block->data, SCRUB, min( SCRUB_SIZE, tsize - sizeof(Heap.Storage) ) ); 1052 1024 #endif // __CFA_DEBUG__ … … 1126 1098 #endif // __CFA_DEBUG__ 1127 1099 1100 #ifdef OWNERSHIP 1128 1101 if ( likely( heapManager == freeHead->homeManager ) ) { // belongs to this thread 1129 1102 header->kind.real.next = freeHead->freeList; // push on stack … … 1132 1105 verify( heapManager ); 1133 1106 1134 #ifdef OWNERSHIP1135 1107 #ifdef RETURNSPIN 1136 1108 lock( freeHead->returnLock ); … … 1141 1113 header->kind.real.next = freeHead->returnList; // link new node to top node 1142 1114 // CAS resets header->kind.real.next = freeHead->returnList on failure 1143 while ( ! __atomic_compare_exchange_n( &freeHead->returnList, &header->kind.real.next, header,1115 while ( ! __atomic_compare_exchange_n( &freeHead->returnList, &header->kind.real.next, (Heap.Storage *)header, 1144 1116 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST ) ); 1145 1117 #endif // RETURNSPIN 1146 1147 #else // no OWNERSHIP 1148 1149 freeHead = &heap->freeLists[ClearStickyBits( header->kind.real.home ) - &freeHead->homeManager->freeLists[0]]; 1150 header->kind.real.next = freeHead->freeList; // push on stack 1151 freeHead->freeList = (Heap.Storage *)header; 1152 #endif // ! OWNERSHIP 1153 1154 #ifdef __U_STATISTICS__ 1155 stats.return_pushes += 1; 1156 stats.return_storage_request += rsize; 1157 stats.return_storage_alloc += size; 1158 #endif // __U_STATISTICS__ 1159 1160 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1161 } // if 1118 } // if 1119 1120 #else // no OWNERSHIP 1121 1122 // kind.real.home is address in owner thread's freeLists, so compute the equivalent position in this thread's freeList. 1123 freeHead = &freeLists[ClearStickyBits( (Heap.FreeHeader *)(header->kind.real.home) ) - &freeHead->homeManager->freeLists[0]]; 1124 header->kind.real.next = freeHead->freeList; // push on stack 1125 freeHead->freeList = (Heap.Storage *)header; 1126 #endif // ! OWNERSHIP 1127 1128 #ifdef __U_STATISTICS__ 1129 stats.return_pushes += 1; 1130 stats.return_storage_request += rsize; 1131 stats.return_storage_alloc += size; 1132 #endif // __U_STATISTICS__ 1133 1134 // OK TO BE PREEMPTED HERE AS heapManager IS NO LONGER ACCESSED. 1162 1135 } // if 1163 1136 … … 1186 1159 #endif // __STATISTICS__ 1187 1160 1188 #if BUCKETLOCK == SPINLOCK1189 1161 for ( Heap.Storage * p = freeLists[i].freeList; p != 0p; p = p->header.kind.real.next ) { 1190 #else1191 for(;;) {1192 // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; p = (p)`next->top ) {1193 // for ( Heap.Storage * p = top( freeLists[i].freeList ); p != 0p; /* p = getNext( p )->top */) {1194 // Heap.Storage * temp = p->header.kind.real.next.top; // FIX ME: direct assignent fails, initialization works`1195 // typeof(p) temp = (( p )`next)->top; // FIX ME: direct assignent fails, initialization works`1196 // p = temp;1197 #endif // BUCKETLOCK1198 1162 total += size; 1199 1163 #ifdef __STATISTICS__
Note:
See TracChangeset
for help on using the changeset viewer.