Changeset 433905a
- Timestamp:
- Apr 29, 2022, 9:39:09 PM (3 years ago)
- Branches:
- ADT, ast-experimental, master, pthread-emulation, qualifiedEnum
- Children:
- 4b4f95f
- Parents:
- b2516e6
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
libcfa/src/heap.cfa
rb2516e6 r433905a 10 10 // Created On : Tue Dec 19 21:58:35 2017 11 11 // Last Modified By : Peter A. Buhr 12 // Last Modified On : Mon Apr 25 18:51:36202213 // Update Count : 11 4712 // Last Modified On : Fri Apr 29 19:05:03 2022 13 // Update Count : 1167 14 14 // 15 15 … … 87 87 88 88 89 #ifdef __CFA_DEBUG__ 90 static size_t allocUnfreed; // running total of allocations minus frees 91 92 static void prtUnfreed() { 93 if ( allocUnfreed != 0 ) { 94 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 95 char helpText[512]; 96 int len = snprintf( helpText, sizeof(helpText), "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 97 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 98 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 99 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 100 } // if 101 } // prtUnfreed 102 103 extern int cfa_main_returned; // from interpose.cfa 104 extern "C" { 105 void heapAppStart() { // called by __cfaabi_appready_startup 106 allocUnfreed = 0; 107 } // heapAppStart 108 109 void heapAppStop() { // called by __cfaabi_appready_startdown 110 fclose( stdin ); fclose( stdout ); 111 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 112 } // heapAppStop 113 } // extern "C" 114 #endif // __CFA_DEBUG__ 115 116 117 // statically allocated variables => zero filled. 118 static size_t heapExpand; // sbrk advance 119 static size_t mmapStart; // cross over point for mmap 120 static unsigned int maxBucketsUsed; // maximum number of buckets in use 121 // extern visibility, used by runtime kernel 122 size_t __page_size; // architecture pagesize 123 int __map_prot; // common mmap/mprotect protection 124 125 126 #define SPINLOCK 0 127 #define LOCKFREE 1 128 #define BUCKETLOCK SPINLOCK 129 #if BUCKETLOCK == SPINLOCK 130 #elif BUCKETLOCK == LOCKFREE 131 #include <stackLockFree.hfa> 132 #else 133 #error undefined lock type for bucket lock 134 #endif // LOCKFREE 135 136 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 137 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 138 enum { NoBucketSizes = 91 }; // number of buckets sizes 139 140 struct Heap { 141 struct Storage { 142 struct Header { // header 143 union Kind { 144 struct RealHeader { 145 union { 146 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 147 union { 148 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped 149 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 150 void * home; // allocated block points back to home locations (must overlay alignment) 151 size_t blockSize; // size for munmap (must overlay alignment) 152 #if BUCKETLOCK == SPINLOCK 153 Storage * next; // freed block points to next freed block of same size 154 #endif // SPINLOCK 155 }; 156 size_t size; // allocation size in bytes 157 }; 158 #if BUCKETLOCK == LOCKFREE 159 Link(Storage) next; // freed block points next freed block of same size (double-wide) 160 #endif // LOCKFREE 161 }; 162 } real; // RealHeader 163 164 struct FakeHeader { 165 uintptr_t alignment; // 1st low-order bit => fake header & alignment 166 uintptr_t offset; 167 } fake; // FakeHeader 168 } kind; // Kind 169 } header; // Header 170 171 char pad[libAlign() - sizeof( Header )]; 172 char data[0]; // storage 173 }; // Storage 174 175 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 176 177 struct FreeHeader { 178 #if BUCKETLOCK == SPINLOCK 179 __spinlock_t lock; // must be first field for alignment 180 Storage * freeList; 181 #else 182 StackLF(Storage) freeList; 183 #endif // BUCKETLOCK 184 size_t blockSize; // size of allocations on this list 185 }; // FreeHeader 186 187 // must be first fields for alignment 188 __spinlock_t extlock; // protects allocation-buffer extension 189 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 190 191 void * heapBegin; // start of heap 192 void * heapEnd; // logical end of heap 193 size_t heapRemaining; // amount of storage not allocated in the current chunk 194 }; // Heap 195 196 #if BUCKETLOCK == LOCKFREE 197 static inline { 198 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 199 void ?{}( Heap.FreeHeader & ) {} 200 void ^?{}( Heap.FreeHeader & ) {} 201 } // distribution 202 #endif // LOCKFREE 203 204 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 205 206 207 #ifdef FASTLOOKUP 208 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 209 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 210 #endif // FASTLOOKUP 211 212 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 213 #ifdef __CFA_DEBUG__ 214 static bool heapBoot = 0; // detect recursion during boot 215 #endif // __CFA_DEBUG__ 216 217 218 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. 219 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 220 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 221 static const unsigned int bucketSizes[] @= { // different bucket sizes 222 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 223 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 224 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 225 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 226 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 227 1_536, 2_048 + sizeof(Heap.Storage), // 2 228 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 229 6_144, 8_192 + sizeof(Heap.Storage), // 2 230 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 231 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 232 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 233 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 234 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 235 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 236 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 237 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 238 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 239 }; 240 241 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 242 243 // The constructor for heapManager is called explicitly in memory_startup. 244 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 245 246 247 //####################### Memory Allocation Routines Helpers #################### 89 //####################### Heap Statistics #################### 248 90 249 91 … … 307 149 return lhs; 308 150 } // ?+=? 309 151 #endif // __STATISTICS__ 152 153 154 #define SPINLOCK 0 155 #define LOCKFREE 1 156 #define BUCKETLOCK SPINLOCK 157 #if BUCKETLOCK == SPINLOCK 158 #elif BUCKETLOCK == LOCKFREE 159 #include <stackLockFree.hfa> 160 #else 161 #error undefined lock type for bucket lock 162 #endif // LOCKFREE 163 164 // Recursive definitions: HeapManager needs size of bucket array and bucket area needs sizeof HeapManager storage. 165 // Break recursion by hardcoding number of buckets and statically checking number is correct after bucket array defined. 166 enum { NoBucketSizes = 91 }; // number of buckets sizes 167 168 struct Heap { 169 struct Storage { 170 struct Header { // header 171 union Kind { 172 struct RealHeader { 173 union { 174 struct { // 4-byte word => 8-byte header, 8-byte word => 16-byte header 175 union { 176 // 2nd low-order bit => zero filled, 3rd low-order bit => mmapped 177 // FreeHeader * home; // allocated block points back to home locations (must overlay alignment) 178 void * home; // allocated block points back to home locations (must overlay alignment) 179 size_t blockSize; // size for munmap (must overlay alignment) 180 #if BUCKETLOCK == SPINLOCK 181 Storage * next; // freed block points to next freed block of same size 182 #endif // SPINLOCK 183 }; 184 size_t size; // allocation size in bytes 185 }; 186 #if BUCKETLOCK == LOCKFREE 187 Link(Storage) next; // freed block points next freed block of same size (double-wide) 188 #endif // LOCKFREE 189 }; 190 } real; // RealHeader 191 192 struct FakeHeader { 193 uintptr_t alignment; // 1st low-order bit => fake header & alignment 194 uintptr_t offset; 195 } fake; // FakeHeader 196 } kind; // Kind 197 } header; // Header 198 199 char pad[libAlign() - sizeof( Header )]; 200 char data[0]; // storage 201 }; // Storage 202 203 static_assert( libAlign() >= sizeof( Storage ), "minimum alignment < sizeof( Storage )" ); 204 205 struct FreeHeader { 206 size_t blockSize __attribute__(( aligned (8) )); // size of allocations on this list 207 #if BUCKETLOCK == SPINLOCK 208 __spinlock_t lock; 209 Storage * freeList; 210 #else 211 StackLF(Storage) freeList; 212 #endif // BUCKETLOCK 213 } __attribute__(( aligned (8) )); // FreeHeader 214 215 FreeHeader freeLists[NoBucketSizes]; // buckets for different allocation sizes 216 217 __spinlock_t extlock; // protects allocation-buffer extension 218 void * heapBegin; // start of heap 219 void * heapEnd; // logical end of heap 220 size_t heapRemaining; // amount of storage not allocated in the current chunk 221 }; // Heap 222 223 #if BUCKETLOCK == LOCKFREE 224 static inline { 225 Link(Heap.Storage) * ?`next( Heap.Storage * this ) { return &this->header.kind.real.next; } 226 void ?{}( Heap.FreeHeader & ) {} 227 void ^?{}( Heap.FreeHeader & ) {} 228 } // distribution 229 #endif // LOCKFREE 230 231 static inline size_t getKey( const Heap.FreeHeader & freeheader ) { return freeheader.blockSize; } 232 233 234 #ifdef FASTLOOKUP 235 enum { LookupSizes = 65_536 + sizeof(Heap.Storage) }; // number of fast lookup sizes 236 static unsigned char lookup[LookupSizes]; // O(1) lookup for small sizes 237 #endif // FASTLOOKUP 238 239 static const off_t mmapFd = -1; // fake or actual fd for anonymous file 240 #ifdef __CFA_DEBUG__ 241 static bool heapBoot = 0; // detect recursion during boot 242 #endif // __CFA_DEBUG__ 243 244 245 // Size of array must harmonize with NoBucketSizes and individual bucket sizes must be multiple of 16. 246 // Smaller multiples of 16 and powers of 2 are common allocation sizes, so make them generate the minimum required bucket size. 247 // malloc(0) returns 0p, so no bucket is necessary for 0 bytes returning an address that can be freed. 248 static const unsigned int bucketSizes[] @= { // different bucket sizes 249 16 + sizeof(Heap.Storage), 32 + sizeof(Heap.Storage), 48 + sizeof(Heap.Storage), 64 + sizeof(Heap.Storage), // 4 250 96 + sizeof(Heap.Storage), 112 + sizeof(Heap.Storage), 128 + sizeof(Heap.Storage), // 3 251 160, 192, 224, 256 + sizeof(Heap.Storage), // 4 252 320, 384, 448, 512 + sizeof(Heap.Storage), // 4 253 640, 768, 896, 1_024 + sizeof(Heap.Storage), // 4 254 1_536, 2_048 + sizeof(Heap.Storage), // 2 255 2_560, 3_072, 3_584, 4_096 + sizeof(Heap.Storage), // 4 256 6_144, 8_192 + sizeof(Heap.Storage), // 2 257 9_216, 10_240, 11_264, 12_288, 13_312, 14_336, 15_360, 16_384 + sizeof(Heap.Storage), // 8 258 18_432, 20_480, 22_528, 24_576, 26_624, 28_672, 30_720, 32_768 + sizeof(Heap.Storage), // 8 259 36_864, 40_960, 45_056, 49_152, 53_248, 57_344, 61_440, 65_536 + sizeof(Heap.Storage), // 8 260 73_728, 81_920, 90_112, 98_304, 106_496, 114_688, 122_880, 131_072 + sizeof(Heap.Storage), // 8 261 147_456, 163_840, 180_224, 196_608, 212_992, 229_376, 245_760, 262_144 + sizeof(Heap.Storage), // 8 262 294_912, 327_680, 360_448, 393_216, 425_984, 458_752, 491_520, 524_288 + sizeof(Heap.Storage), // 8 263 655_360, 786_432, 917_504, 1_048_576 + sizeof(Heap.Storage), // 4 264 1_179_648, 1_310_720, 1_441_792, 1_572_864, 1_703_936, 1_835_008, 1_966_080, 2_097_152 + sizeof(Heap.Storage), // 8 265 2_621_440, 3_145_728, 3_670_016, 4_194_304 + sizeof(Heap.Storage), // 4 266 }; 267 268 static_assert( NoBucketSizes == sizeof(bucketSizes) / sizeof(bucketSizes[0] ), "size of bucket array wrong" ); 269 270 // The constructor for heapManager is called explicitly in memory_startup. 271 static Heap heapManager __attribute__(( aligned (128) )) @= {}; // size of cache line to prevent false sharing 272 273 274 //####################### Memory Allocation Routines Helpers #################### 275 276 277 #ifdef __CFA_DEBUG__ 278 static size_t allocUnfreed; // running total of allocations minus frees 279 280 static void prtUnfreed() { 281 if ( allocUnfreed != 0 ) { 282 // DO NOT USE STREAMS AS THEY MAY BE UNAVAILABLE AT THIS POINT. 283 char helpText[512]; 284 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 285 "CFA warning (UNIX pid:%ld) : program terminating with %zu(0x%zx) bytes of storage allocated but not freed.\n" 286 "Possible cause is unfreed storage allocated by the program or system/library routines called from the program.\n", 287 (long int)getpid(), allocUnfreed, allocUnfreed ); // always print the UNIX pid 288 } // if 289 } // prtUnfreed 290 291 extern int cfa_main_returned; // from interpose.cfa 292 extern "C" { 293 void heapAppStart() { // called by __cfaabi_appready_startup 294 allocUnfreed = 0; 295 } // heapAppStart 296 297 void heapAppStop() { // called by __cfaabi_appready_startdown 298 fclose( stdin ); fclose( stdout ); 299 if ( cfa_main_returned ) prtUnfreed(); // do not check unfreed storage if exit called 300 } // heapAppStop 301 } // extern "C" 302 #endif // __CFA_DEBUG__ 303 304 305 #ifdef __STATISTICS__ 310 306 static HeapStatistics stats; // zero filled 311 307 static unsigned int sbrk_calls; … … 387 383 388 384 385 // statically allocated variables => zero filled. 386 static size_t heapExpand; // sbrk advance 387 static size_t mmapStart; // cross over point for mmap 388 static unsigned int maxBucketsUsed; // maximum number of buckets in use 389 // extern visibility, used by runtime kernel 390 size_t __page_size; // architecture pagesize 391 int __map_prot; // common mmap/mprotect protection 392 393 389 394 // thunk problem 390 395 size_t Bsearchl( unsigned int key, const unsigned int * vals, size_t dim ) { … … 490 495 } else { 491 496 fakeHeader( header, alignment ); 492 if ( unlikely( MmappedBit( header ) ) ) { 493 assert( addr < heapBegin || heapEnd < addr );497 if ( unlikely( MmappedBit( header ) ) ) { // mmapped ? 498 verify( addr < heapBegin || heapEnd < addr ); 494 499 size = ClearStickyBits( header->kind.real.blockSize ); // mmap size 495 500 return true; … … 503 508 checkHeader( header < (Heap.Storage.Header *)heapBegin || (Heap.Storage.Header *)heapEnd < header, name, addr ); // bad address ? (offset could be + or -) 504 509 505 if ( freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) { 506 abort( "Attempt to %s storage %p with corrupted header.\n" 507 "Possible cause is duplicate free on same block or overwriting of header information.", 508 name, addr ); 509 } // if 510 Heap * homeManager; 511 if ( unlikely( freeHead == 0p || // freed and only free-list node => null link 512 // freed and link points at another free block not to a bucket in the bucket array. 513 freeHead < &freeLists[0] || &freeLists[NoBucketSizes] <= freeHead ) ) { 514 abort( "**** Error **** attempt to %s storage %p with corrupted header.\n" 515 "Possible cause is duplicate free on same block or overwriting of header information.", 516 name, addr ); 517 } // if 510 518 #endif // __CFA_DEBUG__ 511 519 … … 560 568 sbrk_storage += increase; 561 569 #endif // __STATISTICS__ 570 562 571 #ifdef __CFA_DEBUG__ 563 572 // Set new memory to garbage so subsequent uninitialized usages might fail. … … 565 574 //Memset( (char *)heapEnd + heapRemaining, increase ); 566 575 #endif // __CFA_DEBUG__ 576 567 577 rem = heapRemaining + increase - size; 568 578 } // if … … 651 661 __atomic_add_fetch( &allocUnfreed, tsize, __ATOMIC_SEQ_CST ); 652 662 if ( traceHeap() ) { 653 enum { BufferSize = 64 }; 654 char helpText[BufferSize]; 655 int len = snprintf( helpText, BufferSize, "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); 656 __cfaabi_bits_write( STDERR_FILENO, helpText, len ); // print debug/nodebug 663 char helpText[64]; 664 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 665 "%p = Malloc( %zu ) (allocated %zu)\n", addr, size, tsize ); // print debug/nodebug 657 666 } // if 658 667 #endif // __CFA_DEBUG__ … … 711 720 if ( traceHeap() ) { 712 721 char helpText[64]; 713 int len = snprintf( helpText, sizeof(helpText), "Free( %p ) size:%zu\n", addr, size );714 __cfaabi_bits_write( STDERR_FILENO, helpText, len); // print debug/nodebug722 __cfaabi_bits_print_buffer( STDERR_FILENO, helpText, sizeof(helpText), 723 "Free( %p ) size:%zu\n", addr, size ); // print debug/nodebug 715 724 } // if 716 725 #endif // __CFA_DEBUG__
Note: See TracChangeset
for help on using the changeset viewer.